Index: sys/vm/vm_glue.c =================================================================== --- sys/vm/vm_glue.c +++ sys/vm/vm_glue.c @@ -342,10 +342,8 @@ VM_OBJECT_WLOCK(ksobj); (void)vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_WIRED, ma, pages); - for (i = 0; i < pages; i++) { + for (i = 0; i < pages; i++) vm_page_valid(ma[i]); - vm_page_xunbusy(ma[i]); - } VM_OBJECT_WUNLOCK(ksobj); pmap_qenter(ks, ma, pages); *ksobjp = ksobj; @@ -365,7 +363,7 @@ m = vm_page_lookup(ksobj, i); if (m == NULL) panic("%s: kstack already missing?", __func__); - vm_page_busy_acquire(m, 0); + vm_page_xbusy_claim(m); vm_page_unwire_noq(m); vm_page_free(m); } Index: sys/vm/vm_kern.c =================================================================== --- sys/vm/vm_kern.c +++ sys/vm/vm_kern.c @@ -224,7 +224,6 @@ if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) pmap_zero_page(m); vm_page_valid(m); - vm_page_xunbusy(m); pmap_enter(kernel_pmap, addr + i, m, prot, prot | PMAP_ENTER_WIRED, 0); } @@ -317,7 +316,6 @@ if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) pmap_zero_page(m); vm_page_valid(m); - vm_page_xunbusy(m); pmap_enter(kernel_pmap, tmp, m, VM_PROT_RW, VM_PROT_RW | PMAP_ENTER_WIRED, 0); tmp += PAGE_SIZE; @@ -501,7 +499,6 @@ KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("kmem_malloc: page %p is managed", m)); vm_page_valid(m); - vm_page_xunbusy(m); pmap_enter(kernel_pmap, addr + i, m, prot, prot | PMAP_ENTER_WIRED, 0); #if VM_NRESERVLEVEL > 0 @@ -591,7 +588,7 @@ #endif for (; offset < end; offset += PAGE_SIZE, m = next) { next = vm_page_next(m); - vm_page_busy_acquire(m, 0); + vm_page_xbusy_claim(m); vm_page_unwire_noq(m); vm_page_free(m); } Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h +++ sys/vm/vm_page.h @@ -762,9 +762,14 @@ void vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits); #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) \ vm_page_assert_pga_writeable(m, bits) +#define vm_page_xbusy_claim(m) do { \ + vm_page_assert_xbusied_unchecked((m)); \ + (m)->busy_lock = VPB_CURTHREAD_EXCLUSIVE; \ +} while (0) #else #define VM_PAGE_OBJECT_BUSY_ASSERT(m) (void)0 #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) (void)0 +#define vm_page_xbusy_claim(m) #endif #if BYTE_ORDER == BIG_ENDIAN