Index: head/sys/dev/spibus/spigen.c =================================================================== --- head/sys/dev/spibus/spigen.c +++ head/sys/dev/spibus/spigen.c @@ -325,8 +325,9 @@ vm_object_reference_locked(mmap->bufobj); // kernel and userland both for (n = 0; n < pages; n++) { m[n] = vm_page_grab(mmap->bufobj, n, - VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_WIRED); - m[n]->valid = VM_PAGE_BITS_ALL; + VM_ALLOC_ZERO | VM_ALLOC_WIRED); + vm_page_valid(m[n]); + vm_page_xunbusy(m[n]); } VM_OBJECT_WUNLOCK(mmap->bufobj); pmap_qenter(mmap->kvaddr, m, pages); Index: head/sys/kern/kern_kcov.c =================================================================== --- head/sys/kern/kern_kcov.c +++ head/sys/kern/kern_kcov.c @@ -383,8 +383,9 @@ VM_OBJECT_WLOCK(info->bufobj); for (n = 0; n < pages; n++) { m = vm_page_grab(info->bufobj, n, - VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_WIRED); - m->valid = VM_PAGE_BITS_ALL; + VM_ALLOC_ZERO | VM_ALLOC_WIRED); + vm_page_valid(m); + vm_page_xunbusy(m); pmap_qenter(info->kvaddr + n * PAGE_SIZE, &m, 1); } VM_OBJECT_WUNLOCK(info->bufobj); Index: head/sys/kern/kern_sendfile.c =================================================================== --- head/sys/kern/kern_sendfile.c +++ head/sys/kern/kern_sendfile.c @@ -388,7 +388,7 @@ if (!vm_pager_has_page(obj, OFF_TO_IDX(vmoff(i, off)), NULL, &a)) { pmap_zero_page(pa[i]); - pa[i]->valid = VM_PAGE_BITS_ALL; + vm_page_valid(pa[i]); MPASS(pa[i]->dirty == 0); vm_page_xunbusy(pa[i]); i++; Index: head/sys/vm/vm_glue.c =================================================================== --- head/sys/vm/vm_glue.c +++ head/sys/vm/vm_glue.c @@ -340,10 +340,12 @@ * page of stack. */ VM_OBJECT_WLOCK(ksobj); - (void)vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | - VM_ALLOC_WIRED, ma, pages); - for (i = 0; i < pages; i++) - ma[i]->valid = VM_PAGE_BITS_ALL; + (void)vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_WIRED, + ma, pages); + for (i = 0; i < pages; i++) { + vm_page_valid(ma[i]); + vm_page_xunbusy(ma[i]); + } VM_OBJECT_WUNLOCK(ksobj); pmap_qenter(ks, ma, pages); *ksobjp = ksobj; Index: head/sys/vm/vm_kern.c =================================================================== --- head/sys/vm/vm_kern.c +++ head/sys/vm/vm_kern.c @@ -193,7 +193,7 @@ if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr)) return (0); offset = addr - VM_MIN_KERNEL_ADDRESS; - pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; + pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED; pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); pflags |= VM_ALLOC_NOWAIT; prot = (flags & M_EXEC) != 0 ? VM_PROT_ALL : VM_PROT_RW; @@ -223,7 +223,8 @@ vm_phys_domain(m), domain)); if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) pmap_zero_page(m); - m->valid = VM_PAGE_BITS_ALL; + vm_page_valid(m); + vm_page_xunbusy(m); pmap_enter(kernel_pmap, addr + i, m, prot, prot | PMAP_ENTER_WIRED, 0); } @@ -284,7 +285,7 @@ if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr)) return (0); offset = addr - VM_MIN_KERNEL_ADDRESS; - pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; + pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED; pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); pflags |= VM_ALLOC_NOWAIT; npages = atop(size); @@ -315,7 +316,8 @@ for (; m < end_m; m++) { if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) pmap_zero_page(m); - m->valid = VM_PAGE_BITS_ALL; + vm_page_valid(m); + vm_page_xunbusy(m); pmap_enter(kernel_pmap, tmp, m, VM_PROT_RW, VM_PROT_RW | PMAP_ENTER_WIRED, 0); tmp += PAGE_SIZE; @@ -465,7 +467,7 @@ ("kmem_back_domain: only supports kernel object.")); offset = addr - VM_MIN_KERNEL_ADDRESS; - pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; + pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED; pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); if (flags & M_WAITOK) pflags |= VM_ALLOC_WAITFAIL; @@ -498,7 +500,8 @@ pmap_zero_page(m); KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("kmem_malloc: page %p is managed", m)); - m->valid = VM_PAGE_BITS_ALL; + vm_page_valid(m); + vm_page_xunbusy(m); pmap_enter(kernel_pmap, addr + i, m, prot, prot | PMAP_ENTER_WIRED, 0); #if VM_NRESERVLEVEL > 0