Index: sys/amd64/amd64/mp_machdep.c =================================================================== --- sys/amd64/amd64/mp_machdep.c +++ sys/amd64/amd64/mp_machdep.c @@ -305,8 +305,7 @@ oa = (vm_offset_t)&__pcpu[cpuid]; if (vm_phys_domain(pmap_kextract(oa)) == domain) return; - m = vm_page_alloc_domain(NULL, 0, domain, - VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ); + m = vm_page_alloc_noobj_domain(domain, 0); if (m == NULL) return; na = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -2342,10 +2342,9 @@ highest = start + (s / sizeof(*pvd)) - 1; for (j = 0; j < s; j += PAGE_SIZE) { - vm_page_t m = vm_page_alloc_domain(NULL, 0, - domain, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ); + vm_page_t m = vm_page_alloc_noobj_domain(domain, 0); if (m == NULL) - panic("vm_page_alloc_domain failed for %lx\n", (vm_offset_t)pvd + j); + panic("failed to allocate PV table page"); pmap_qenter((vm_offset_t)pvd + j, &m, 1); } @@ -4311,15 +4310,11 @@ { vm_page_t m; - m = vm_page_alloc(NULL, pindex, flags | VM_ALLOC_NOOBJ); + m = vm_page_alloc_noobj(flags); if (__predict_false(m == NULL)) return (NULL); - + m->pindex = pindex; pmap_pt_page_count_adj(pmap, 1); - - if ((flags & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0) - pmap_zero_page(m); - return (m); } @@ -4357,8 +4352,8 @@ /* * allocate the page directory page */ - pmltop_pg = pmap_alloc_pt_page(NULL, 0, VM_ALLOC_NORMAL | - VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_WAITOK); + pmltop_pg = pmap_alloc_pt_page(NULL, 0, VM_ALLOC_WIRED | VM_ALLOC_ZERO | + VM_ALLOC_WAITOK); pmltop_phys = VM_PAGE_TO_PHYS(pmltop_pg); pmap->pm_pmltop = (pml5_entry_t *)PHYS_TO_DMAP(pmltop_phys); @@ -4388,8 +4383,7 @@ pmap_pinit_pml4(pmltop_pg); if ((curproc->p_md.md_flags & P_MD_KPTI) != 0) { pmltop_pgu = pmap_alloc_pt_page(NULL, 0, - VM_ALLOC_WIRED | VM_ALLOC_NORMAL | - VM_ALLOC_WAITOK); + VM_ALLOC_WIRED | VM_ALLOC_WAITOK); pmap->pm_pmltopu = (pml4_entry_t *)PHYS_TO_DMAP( VM_PAGE_TO_PHYS(pmltop_pgu)); if (pmap_is_la57(pmap)) @@ -5479,8 +5473,7 @@ } } /* No free items, allocate another chunk */ - m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | - VM_ALLOC_WIRED); + m = vm_page_alloc_noobj(VM_ALLOC_WIRED); if (m == NULL) { if (lockp == NULL) { PV_STAT(counter_u64_add(pc_chunk_tryfail, 1)); @@ -5583,8 +5576,7 @@ break; } for (reclaimed = false; avail < needed; avail += _NPCPV) { - m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | - VM_ALLOC_WIRED); + m = vm_page_alloc_noobj(VM_ALLOC_WIRED); if (m == NULL) { m = reclaim_pv_chunk(pmap, lockp); if (m == NULL) @@ -5956,8 +5948,7 @@ * priority is normal. */ mpte = pmap_alloc_pt_page(pmap, pmap_pde_pindex(va), - (in_kernel ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) | - VM_ALLOC_WIRED); + (in_kernel ? VM_ALLOC_INTERRUPT : 0) | VM_ALLOC_WIRED); /* * If the allocation of the new page table page fails, @@ -10337,8 +10328,7 @@ static vm_page_t pmap_large_map_getptp_unlocked(void) { - return (pmap_alloc_pt_page(kernel_pmap, 0, - VM_ALLOC_NORMAL | VM_ALLOC_ZERO)); + return (pmap_alloc_pt_page(kernel_pmap, 0, VM_ALLOC_ZERO)); } static vm_page_t @@ -11393,12 +11383,10 @@ { vm_page_t m; - m = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | - VM_ALLOC_WIRED | VM_ALLOC_ZERO); + m = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | + VM_ALLOC_ZERO); if (m == NULL) panic("%s: no memory to grow shadow map", __func__); - if ((m->flags & PG_ZERO) == 0) - pmap_zero_page(m); return (m); } @@ -11463,12 +11451,10 @@ { vm_page_t m; - m = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | - VM_ALLOC_WIRED | VM_ALLOC_ZERO); + m = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | + VM_ALLOC_ZERO); if (m == NULL) panic("%s: no memory to grow shadow map", __func__); - if ((m->flags & PG_ZERO) == 0) - pmap_zero_page(m); return (m); } Index: sys/amd64/amd64/uma_machdep.c =================================================================== --- sys/amd64/amd64/uma_machdep.c +++ sys/amd64/amd64/uma_machdep.c @@ -49,16 +49,14 @@ void *va; *flags = UMA_SLAB_PRIV; - m = vm_page_alloc_domain(NULL, 0, domain, - malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); + m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) | + VM_ALLOC_WIRED); if (m == NULL) return (NULL); pa = m->phys_addr; if ((wait & M_NODUMP) == 0) dump_add_page(pa); va = (void *)PHYS_TO_DMAP(pa); - if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) - pagezero(va); return (va); } Index: sys/arm/arm/pmap-v6.c =================================================================== --- sys/arm/arm/pmap-v6.c +++ sys/arm/arm/pmap-v6.c @@ -2069,12 +2069,12 @@ /* * Install new PT2s page into kernel PT2TAB. */ - m = vm_page_alloc(NULL, - pte1_index(kernel_vm_end) & ~PT2PG_MASK, - VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | + m = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (m == NULL) panic("%s: no memory to grow kernel", __func__); + m->pindex = pte1_index(kernel_vm_end) & ~PT2PG_MASK; + /* * QQQ: To link all new L2 page tables from L1 page * table now and so pmap_kenter_pte1() them @@ -2488,8 +2488,7 @@ /* * Install new PT2s page into pmap PT2TAB. */ - m = vm_page_alloc(NULL, pte1_idx & ~PT2PG_MASK, - VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); + m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (m == NULL) { if ((flags & PMAP_ENTER_NOSLEEP) == 0) { PMAP_UNLOCK(pmap); @@ -2505,6 +2504,7 @@ */ return (NULL); } + m->pindex = pte1_idx & ~PT2PG_MASK; pmap->pm_stats.resident_count++; pt2pg_pa = pmap_pt2pg_init(pmap, va, m); } else { @@ -3062,8 +3062,8 @@ * global lock. If "pv_vafree" is currently non-empty, it will * remain non-empty until pmap_pte2list_alloc() completes. */ - if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | - VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { + if (pv_vafree == 0 || + (m = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) { if (try) { pv_entry_count--; PV_STAT(pc_chunk_tryfail++); @@ -3711,9 +3711,8 @@ * "failure" if the mapping was never accessed or the * allocation of the new page table page fails. */ - if ((opte1 & PTE1_A) == 0 || (m = vm_page_alloc(NULL, - pte1_index(va) & ~PT2PG_MASK, VM_ALLOC_NOOBJ | - VM_ALLOC_NORMAL | VM_ALLOC_WIRED)) == NULL) { + if ((opte1 & PTE1_A) == 0 || + (m = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) { SLIST_INIT(&free); pmap_remove_pte1(pmap, pte1p, pte1_trunc(va), &free); vm_page_free_pages_toq(&free, false); @@ -3721,6 +3720,7 @@ __func__, va, pmap); return (FALSE); } + m->pindex = pte1_index(va) & ~PT2PG_MASK; if (va < VM_MAXUSER_ADDRESS) pmap->pm_stats.resident_count++; Index: sys/arm64/arm64/pmap.c =================================================================== --- sys/arm64/arm64/pmap.c +++ sys/arm64/arm64/pmap.c @@ -1765,16 +1765,11 @@ /* * allocate the l0 page */ - while ((m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | - VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) - vm_wait(NULL); - + m = vm_page_alloc_noobj(VM_ALLOC_WAITOK | VM_ALLOC_WIRED | + VM_ALLOC_ZERO); pmap->pm_l0_paddr = VM_PAGE_TO_PHYS(m); pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr); - if ((m->flags & PG_ZERO) == 0) - pagezero(pmap->pm_l0); - pmap->pm_root.rt_root = 0; bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); pmap->pm_cookie = COOKIE_FROM(-1, INT_MAX); @@ -1840,8 +1835,7 @@ /* * Allocate a page table page. */ - if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | - VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { + if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { if (lockp != NULL) { RELEASE_PV_LIST_LOCK(lockp); PMAP_UNLOCK(pmap); @@ -1855,8 +1849,7 @@ */ return (NULL); } - if ((m->flags & PG_ZERO) == 0) - pmap_zero_page(m); + m->pindex = ptepindex; /* * Because of AArch64's weak memory consistency model, we must have a @@ -2190,13 +2183,11 @@ l1 = pmap_l0_to_l1(l0, kernel_vm_end); if (pmap_load(l1) == 0) { /* We need a new PDP entry */ - nkpg = vm_page_alloc(NULL, kernel_vm_end >> L1_SHIFT, - VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | + nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); - if ((nkpg->flags & PG_ZERO) == 0) - pmap_zero_page(nkpg); + nkpg->pindex = kernel_vm_end >> L1_SHIFT; /* See the dmb() in _pmap_alloc_l3(). */ dmb(ishst); paddr = VM_PAGE_TO_PHYS(nkpg); @@ -2213,13 +2204,11 @@ continue; } - nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_SHIFT, - VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | + nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); - if ((nkpg->flags & PG_ZERO) == 0) - pmap_zero_page(nkpg); + nkpg->pindex = kernel_vm_end >> L2_SHIFT; /* See the dmb() in _pmap_alloc_l3(). */ dmb(ishst); paddr = VM_PAGE_TO_PHYS(nkpg); @@ -2564,8 +2553,7 @@ } } /* No free items, allocate another chunk */ - m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | - VM_ALLOC_WIRED); + m = vm_page_alloc_noobj(VM_ALLOC_WIRED); if (m == NULL) { if (lockp == NULL) { PV_STAT(pc_chunk_tryfail++); @@ -2630,8 +2618,7 @@ break; } for (reclaimed = false; avail < needed; avail += _NPCPV) { - m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | - VM_ALLOC_WIRED); + m = vm_page_alloc_noobj(VM_ALLOC_WIRED); if (m == NULL) { m = reclaim_pv_chunk(pmap, lockp); if (m == NULL) @@ -6068,8 +6055,8 @@ return (NULL); } - if ((ml2 = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | - VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { + if ((ml2 = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED)) == + NULL) { CTR2(KTR_PMAP, "pmap_demote_l1: failure for va %#lx" " in pmap %p", va, pmap); l2 = NULL; @@ -6200,9 +6187,9 @@ * priority (VM_ALLOC_INTERRUPT). Otherwise, the * priority is normal. */ - ml3 = vm_page_alloc(NULL, pmap_l2_pindex(va), - (VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) | - VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); + ml3 = vm_page_alloc_noobj( + (VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : 0) | + VM_ALLOC_WIRED); /* * If the allocation of the new page table page fails, @@ -6214,6 +6201,7 @@ " in pmap %p", va, pmap); goto fail; } + ml3->pindex = pmap_l2_pindex(va); if (!ADDR_IS_KERNEL(va)) { ml3->ref_count = NL3PG; Index: sys/arm64/arm64/uma_machdep.c =================================================================== --- sys/arm64/arm64/uma_machdep.c +++ sys/arm64/arm64/uma_machdep.c @@ -47,16 +47,14 @@ void *va; *flags = UMA_SLAB_PRIV; - m = vm_page_alloc_domain(NULL, 0, domain, - malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); + m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) | + VM_ALLOC_WIRED); if (m == NULL) return (NULL); pa = m->phys_addr; if ((wait & M_NODUMP) == 0) dump_add_page(pa); va = (void *)PHYS_TO_DMAP(pa); - if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) - pagezero(va); return (va); } Index: sys/arm64/iommu/iommu_pmap.c =================================================================== --- sys/arm64/iommu/iommu_pmap.c +++ sys/arm64/iommu/iommu_pmap.c @@ -387,16 +387,11 @@ /* * allocate the l0 page */ - while ((m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | - VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) - vm_wait(NULL); - + m = vm_page_alloc_noobj(VM_ALLOC_WAITOK | VM_ALLOC_WIRED | + VM_ALLOC_ZERO); pmap->pm_l0_paddr = VM_PAGE_TO_PHYS(m); pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr); - if ((m->flags & PG_ZERO) == 0) - pagezero(pmap->pm_l0); - pmap->pm_root.rt_root = 0; bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); @@ -446,16 +441,14 @@ /* * Allocate a page table page. */ - if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | - VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { + if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { /* * Indicate the need to retry. While waiting, the page table * page may have been allocated. */ return (NULL); } - if ((m->flags & PG_ZERO) == 0) - pmap_zero_page(m); + m->pindex = ptepindex; /* * Because of AArch64's weak memory consistency model, we must have a Index: sys/compat/linuxkpi/common/src/linux_page.c =================================================================== --- sys/compat/linuxkpi/common/src/linux_page.c +++ sys/compat/linuxkpi/common/src/linux_page.c @@ -97,7 +97,7 @@ if ((flags & M_ZERO) != 0) req |= VM_ALLOC_ZERO; if (order == 0 && (flags & GFP_DMA32) == 0) { - page = vm_page_alloc(NULL, 0, req); + page = vm_page_alloc_noobj(req); if (page == NULL) return (NULL); } else { Index: sys/dev/drm2/ttm/ttm_page_alloc.c =================================================================== --- sys/dev/drm2/ttm/ttm_page_alloc.c +++ sys/dev/drm2/ttm/ttm_page_alloc.c @@ -178,12 +178,7 @@ { vm_page_t p; - while (1) { - p = vm_page_alloc(NULL, 0, req); - if (p != NULL) - break; - vm_wait(NULL); - } + p = vm_page_alloc_noobj(req | VM_ALLOC_WAITOK); pmap_page_set_memattr(p, memattr); return (p); } Index: sys/dev/ti/if_ti.c =================================================================== --- sys/dev/ti/if_ti.c +++ sys/dev/ti/if_ti.c @@ -1609,8 +1609,7 @@ "failed -- packet dropped!\n"); goto nobufs; } - frame = vm_page_alloc(NULL, 0, - VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | + frame = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED); if (frame == NULL) { device_printf(sc->ti_dev, "buffer allocation " Index: sys/dev/virtio/balloon/virtio_balloon.c =================================================================== --- sys/dev/virtio/balloon/virtio_balloon.c +++ sys/dev/virtio/balloon/virtio_balloon.c @@ -460,8 +460,7 @@ { vm_page_t m; - m = vm_page_alloc(NULL, 0, - VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP); + m = vm_page_alloc_noobj(VM_ALLOC_NODUMP); if (m != NULL) sc->vtballoon_current_npages++; Index: sys/dev/xen/balloon/balloon.c =================================================================== --- sys/dev/xen/balloon/balloon.c +++ sys/dev/xen/balloon/balloon.c @@ -228,25 +228,18 @@ nr_pages = nitems(frame_list); for (i = 0; i < nr_pages; i++) { - if ((page = vm_page_alloc(NULL, 0, - VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | - VM_ALLOC_ZERO)) == NULL) { + /* + * Zero the page, or else we might be leaking important data to + * other domains on the same host. Xen doesn't scrub ballooned + * out memory pages, the guest is in charge of making sure that + * no information is leaked. + */ + if ((page = vm_page_alloc_noobj(VM_ALLOC_ZERO)) == NULL) { nr_pages = i; need_sleep = 1; break; } - if ((page->flags & PG_ZERO) == 0) { - /* - * Zero the page, or else we might be leaking - * important data to other domains on the same - * host. Xen doesn't scrub ballooned out memory - * pages, the guest is in charge of making - * sure that no information is leaked. - */ - pmap_zero_page(page); - } - frame_list[i] = (VM_PAGE_TO_PHYS(page) >> PAGE_SHIFT); TAILQ_INSERT_HEAD(&ballooned_pages, page, plinks.q); Index: sys/dev/xen/gntdev/gntdev.c =================================================================== --- sys/dev/xen/gntdev/gntdev.c +++ sys/dev/xen/gntdev/gntdev.c @@ -368,20 +368,13 @@ grefs[i].file_index = file_offset + i * PAGE_SIZE; grefs[i].gref_id = GRANT_REF_INVALID; grefs[i].notify = NULL; - grefs[i].page = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL - | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); + grefs[i].page = vm_page_alloc_noobj(VM_ALLOC_WIRED | + VM_ALLOC_ZERO); if (grefs[i].page == NULL) { log(LOG_ERR, "Page allocation failed."); error = ENOMEM; break; } - if ((grefs[i].page->flags & PG_ZERO) == 0) { - /* - * Zero the allocated page, as we don't want to - * leak our memory to other domains. - */ - pmap_zero_page(grefs[i].page); - } grefs[i].page->valid = VM_PAGE_BITS_ALL; error = gnttab_grant_foreign_access(arg->domid, Index: sys/fs/nfs/nfs_commonsubs.c =================================================================== --- sys/fs/nfs/nfs_commonsubs.c +++ sys/fs/nfs/nfs_commonsubs.c @@ -4913,13 +4913,8 @@ *bextpg = 0; m->m_next = mp; } else { - do { - pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | - VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP | - VM_ALLOC_WIRED); - if (pg == NULL) - vm_wait(NULL); - } while (pg == NULL); + pg = vm_page_alloc_noobj(VM_ALLOC_WAITOK | VM_ALLOC_NODUMP | + VM_ALLOC_WIRED); m->m_epg_pa[m->m_epg_npgs] = VM_PAGE_TO_PHYS(pg); *bextpg = m->m_epg_npgs; m->m_epg_npgs++; Index: sys/fs/nfsclient/nfs_clrpcops.c =================================================================== --- sys/fs/nfsclient/nfs_clrpcops.c +++ sys/fs/nfsclient/nfs_clrpcops.c @@ -8915,13 +8915,8 @@ * page. */ if (left < plen) { - do { - pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | - VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP | - VM_ALLOC_WIRED); - if (pg == NULL) - vm_wait(NULL); - } while (pg == NULL); + pg = vm_page_alloc_noobj(VM_ALLOC_WAITOK | VM_ALLOC_NODUMP | + VM_ALLOC_WIRED); m2->m_epg_pa[0] = VM_PAGE_TO_PHYS(pg); m2->m_epg_npgs = 1; Index: sys/i386/i386/pmap.c =================================================================== --- sys/i386/i386/pmap.c +++ sys/i386/i386/pmap.c @@ -2057,7 +2057,6 @@ static int __CONCAT(PMTYPE, pinit)(pmap_t pmap) { - vm_page_t m; int i; /* @@ -2085,11 +2084,10 @@ * allocate the page directory page(s) */ for (i = 0; i < NPGPTD; i++) { - m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | - VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_WAITOK); - pmap->pm_ptdpg[i] = m; + pmap->pm_ptdpg[i] = vm_page_alloc_noobj(VM_ALLOC_WIRED | + VM_ALLOC_ZERO | VM_ALLOC_WAITOK); #ifdef PMAP_PAE_COMP - pmap->pm_pdpt[i] = VM_PAGE_TO_PHYS(m) | PG_V; + pmap->pm_pdpt[i] = VM_PAGE_TO_PHYS(pmap->pm_ptdpg[i]) | PG_V; #endif } @@ -2103,10 +2101,6 @@ } #endif - for (i = 0; i < NPGPTD; i++) - if ((pmap->pm_ptdpg[i]->flags & PG_ZERO) == 0) - pagezero(pmap->pm_pdir + (i * NPDEPG)); - /* Install the trampoline mapping. */ pmap->pm_pdir[TRPTDI] = PTD[TRPTDI]; @@ -2130,8 +2124,7 @@ /* * Allocate a page table page. */ - if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | - VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { + if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { if ((flags & PMAP_ENTER_NOSLEEP) == 0) { PMAP_UNLOCK(pmap); rw_wunlock(&pvh_global_lock); @@ -2146,8 +2139,7 @@ */ return (NULL); } - if ((m->flags & PG_ZERO) == 0) - pmap_zero_page(m); + m->pindex = ptepindex; /* * Map the pagetable page into the process address space, if @@ -2271,16 +2263,13 @@ continue; } - nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDRSHIFT, - VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | + nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); - + nkpg->pindex = kernel_vm_end >> PDRSHIFT; nkpt++; - if ((nkpg->flags & PG_ZERO) == 0) - pmap_zero_page(nkpg); ptppaddr = VM_PAGE_TO_PHYS(nkpg); newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); pdir_pde(KPTD, kernel_vm_end) = newpdir; @@ -2575,8 +2564,8 @@ * global lock. If "pv_vafree" is currently non-empty, it will * remain non-empty until pmap_ptelist_alloc() completes. */ - if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | - VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { + if (pv_vafree == 0 || + (m = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) { if (try) { pv_entry_count--; PV_STAT(pc_chunk_tryfail++); @@ -2808,9 +2797,8 @@ * "failure" if the mapping was never accessed or the * allocation of the new page table page fails. */ - if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL, - va >> PDRSHIFT, VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL | - VM_ALLOC_WIRED)) == NULL) { + if ((oldpde & PG_A) == 0 || + (mpte = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) { SLIST_INIT(&free); sva = trunc_4mpage(va); pmap_remove_pde(pmap, pde, sva, &free); @@ -2821,6 +2809,7 @@ " in pmap %p", va, pmap); return (FALSE); } + mpte->pindex = va >> PDRSHIFT; if (pmap != kernel_pmap) { mpte->ref_count = NPTEPG; pmap->pm_stats.resident_count++; @@ -5914,8 +5903,7 @@ prev_addr += trm_guard; trm_pte = PTmap + atop(prev_addr); for (af = prev_addr; af < addr; af += PAGE_SIZE) { - m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_NOBUSY | - VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_WAITOK); + m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_WAITOK); pte_store(&trm_pte[atop(af - prev_addr)], VM_PAGE_TO_PHYS(m) | PG_M | PG_A | PG_RW | PG_V | pgeflag | pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE)); @@ -5934,10 +5922,8 @@ trm_guard = 0; pmap_trm_arena = vmem_create("i386trampoline", 0, 0, 1, 0, M_WAITOK); vmem_set_import(pmap_trm_arena, pmap_trm_import, NULL, NULL, PAGE_SIZE); - pd_m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_NOBUSY | - VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_WAITOK | VM_ALLOC_ZERO); - if ((pd_m->flags & PG_ZERO) == 0) - pmap_zero_page(pd_m); + pd_m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_WAITOK | + VM_ALLOC_ZERO); PTD[TRPTDI] = VM_PAGE_TO_PHYS(pd_m) | PG_M | PG_A | PG_RW | PG_V | pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, TRUE); } Index: sys/kern/kern_mbuf.c =================================================================== --- sys/kern/kern_mbuf.c +++ sys/kern/kern_mbuf.c @@ -1624,8 +1624,8 @@ npgs = howmany(len, PAGE_SIZE); for (i = 0; i < npgs; i++) { do { - pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | - VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP | VM_ALLOC_WIRED); + pg = vm_page_alloc_noobj(VM_ALLOC_NODUMP | + VM_ALLOC_WIRED); if (pg == NULL) { if (how == M_NOWAIT) { m->m_epg_npgs = i; Index: sys/kern/uipc_ktls.c =================================================================== --- sys/kern/uipc_ktls.c +++ sys/kern/uipc_ktls.c @@ -2054,12 +2054,8 @@ } else { off = m->m_epg_1st_off; for (i = 0; i < m->m_epg_npgs; i++, off = 0) { - do { - pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | - VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP | - VM_ALLOC_WIRED | VM_ALLOC_WAITFAIL); - } while (pg == NULL); - + pg = vm_page_alloc_noobj(VM_ALLOC_NODUMP | + VM_ALLOC_WIRED | VM_ALLOC_WAITOK); len = m_epg_pagelen(m, i, off); state->parray[i] = VM_PAGE_TO_PHYS(pg); state->dst_iov[i].iov_base = Index: sys/kern/uipc_mbuf.c =================================================================== --- sys/kern/uipc_mbuf.c +++ sys/kern/uipc_mbuf.c @@ -1767,8 +1767,7 @@ vm_page_t pg_array[MBUF_PEXT_MAX_PGS]; int error, length, i, needed; ssize_t total; - int pflags = malloc2vm_flags(how) | VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP | - VM_ALLOC_WIRED; + int pflags = malloc2vm_flags(how) | VM_ALLOC_NODUMP | VM_ALLOC_WIRED; MPASS((flags & M_PKTHDR) == 0); MPASS((how & M_ZERO) == 0); @@ -1816,7 +1815,7 @@ needed = length = MIN(maxseg, total); for (i = 0; needed > 0; i++, needed -= PAGE_SIZE) { retry_page: - pg_array[i] = vm_page_alloc(NULL, 0, pflags); + pg_array[i] = vm_page_alloc_noobj(pflags); if (pg_array[i] == NULL) { if (how & M_NOWAIT) { goto failed; Index: sys/kern/vfs_bio.c =================================================================== --- sys/kern/vfs_bio.c +++ sys/kern/vfs_bio.c @@ -4926,9 +4926,8 @@ * could interfere with paging I/O, no matter which * process we are. */ - p = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ | - VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT) | - VM_ALLOC_WAITOK); + p = vm_page_alloc_noobj(VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | + VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT) | VM_ALLOC_WAITOK); pmap_qenter(pg, &p, 1); bp->b_pages[index] = p; } Index: sys/mips/mips/pmap.c =================================================================== --- sys/mips/mips/pmap.c +++ sys/mips/mips/pmap.c @@ -1115,10 +1115,6 @@ VM_ALLOC_ZERO); if (m == NULL) return (NULL); - - if ((m->flags & PG_ZERO) == 0) - pmap_zero_page(m); - m->pindex = index; return (m); } Index: sys/mips/mips/uma_machdep.c =================================================================== --- sys/mips/mips/uma_machdep.c +++ sys/mips/mips/uma_machdep.c @@ -75,8 +75,6 @@ if ((wait & M_NODUMP) == 0) dump_add_page(pa); va = (void *)MIPS_PHYS_TO_DIRECT(pa); - if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) - bzero(va, PAGE_SIZE); return (va); } Index: sys/powerpc/aim/mmu_oea64.c =================================================================== --- sys/powerpc/aim/mmu_oea64.c +++ sys/powerpc/aim/mmu_oea64.c @@ -1915,8 +1915,8 @@ *flags = UMA_SLAB_PRIV; needed_lock = !PMAP_LOCKED(kernel_pmap); - m = vm_page_alloc_domain(NULL, 0, domain, - malloc2vm_flags(wait) | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ); + m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) | + VM_ALLOC_WIRED); if (m == NULL) return (NULL); @@ -1938,9 +1938,6 @@ if (needed_lock) PMAP_UNLOCK(kernel_pmap); - if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) - bzero((void *)va, PAGE_SIZE); - return (void *)va; } Index: sys/powerpc/aim/mmu_radix.c =================================================================== --- sys/powerpc/aim/mmu_radix.c +++ sys/powerpc/aim/mmu_radix.c @@ -1214,8 +1214,7 @@ break; } for (reclaimed = false; avail < needed; avail += _NPCPV) { - m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | - VM_ALLOC_WIRED); + m = vm_page_alloc_noobj(VM_ALLOC_WIRED); if (m == NULL) { m = reclaim_pv_chunk(pmap, lockp); if (m == NULL) @@ -1637,8 +1636,7 @@ } } /* No free items, allocate another chunk */ - m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | - VM_ALLOC_WIRED); + m = vm_page_alloc_noobj(VM_ALLOC_WIRED); if (m == NULL) { if (lockp == NULL) { PV_STAT(pc_chunk_tryfail++); @@ -3535,13 +3533,11 @@ l2e = pmap_pml2e(kernel_pmap, kernel_vm_end); if ((be64toh(*l2e) & PG_V) == 0) { /* We need a new PDP entry */ - nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_PAGE_SIZE_SHIFT, - VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | + nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); - if ((nkpg->flags & PG_ZERO) == 0) - mmu_radix_zero_page(nkpg); + nkpg->pindex = kernel_vm_end >> L2_PAGE_SIZE_SHIFT; paddr = VM_PAGE_TO_PHYS(nkpg); pde_store(l2e, paddr); continue; /* try again */ @@ -3556,13 +3552,11 @@ continue; } - nkpg = vm_page_alloc(NULL, pmap_l3e_pindex(kernel_vm_end), - VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | + nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); - if ((nkpg->flags & PG_ZERO) == 0) - mmu_radix_zero_page(nkpg); + nkpg->pindex = pmap_l3e_pindex(kernel_vm_end); paddr = VM_PAGE_TO_PHYS(nkpg); pde_store(l3e, paddr); @@ -4249,8 +4243,7 @@ /* * Allocate a page table page. */ - if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | - VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { + if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { if (lockp != NULL) { RELEASE_PV_LIST_LOCK(lockp); PMAP_UNLOCK(pmap); @@ -4263,8 +4256,7 @@ */ return (NULL); } - if ((m->flags & PG_ZERO) == 0) - mmu_radix_zero_page(m); + m->pindex = ptepindex; /* * Map the pagetable page into the process address space, if @@ -4921,10 +4913,9 @@ * is the only part of the kernel address space that must be * handled here. */ - if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL, - pmap_l3e_pindex(va), (va >= DMAP_MIN_ADDRESS && va < - DMAP_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) | - VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { + if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc_noobj( + (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS ? + VM_ALLOC_INTERRUPT : 0) | VM_ALLOC_WIRED)) == NULL) { SLIST_INIT(&free); sva = trunc_2mpage(va); pmap_remove_l3e(pmap, l3e, sva, &free, lockp); @@ -4934,6 +4925,7 @@ " in pmap %p", va, pmap); return (FALSE); } + mpte->pindex = pmap_l3e_pindex(va); if (va < VM_MAXUSER_ADDRESS) pmap_resident_count_inc(pmap, 1); } @@ -5953,13 +5945,13 @@ oldpdpe = be64toh(*l2e); KASSERT((oldpdpe & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V), ("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V")); - pdpg = vm_page_alloc(NULL, va >> L2_PAGE_SIZE_SHIFT, - VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); + pdpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED); if (pdpg == NULL) { CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx" " in pmap %p", va, pmap); return (FALSE); } + pdpg->pindex = va >> L2_PAGE_SIZE_SHIFT; pdpgpa = VM_PAGE_TO_PHYS(pdpg); firstpde = (pml3_entry_t *)PHYS_TO_DMAP(pdpgpa); KASSERT((oldpdpe & PG_A) != 0, Index: sys/powerpc/booke/pmap_32.c =================================================================== --- sys/powerpc/booke/pmap_32.c +++ sys/powerpc/booke/pmap_32.c @@ -264,8 +264,7 @@ for (i = 0; i < PTBL_PAGES; i++) { pidx = (PTBL_PAGES * pdir_idx) + i; - while ((m = vm_page_alloc(NULL, pidx, - VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { + while ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) { if (nosleep) { ptbl_free_pmap_ptbl(pmap, ptbl); for (j = 0; j < i; j++) @@ -279,6 +278,7 @@ rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); } + m->pindex = pidx; mtbl[i] = m; } Index: sys/powerpc/booke/pmap_64.c =================================================================== --- sys/powerpc/booke/pmap_64.c +++ sys/powerpc/booke/pmap_64.c @@ -157,8 +157,8 @@ vm_page_t m; int req; - req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO; - while ((m = vm_page_alloc(NULL, idx, req)) == NULL) { + req = VM_ALLOC_WIRED | VM_ALLOC_ZERO; + while ((m = vm_page_alloc_noobj(req)) == NULL) { if (nosleep) return (0); @@ -168,10 +168,7 @@ rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); } - - if (!(m->flags & PG_ZERO)) - /* Zero whole ptbl. */ - mmu_booke_zero_page(m); + m->pindex = idx; return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m))); } Index: sys/powerpc/powerpc/uma_machdep.c =================================================================== --- sys/powerpc/powerpc/uma_machdep.c +++ sys/powerpc/powerpc/uma_machdep.c @@ -55,8 +55,8 @@ *flags = UMA_SLAB_PRIV; - m = vm_page_alloc_domain(NULL, 0, domain, - malloc2vm_flags(wait) | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ); + m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) | + VM_ALLOC_WIRED); if (m == NULL) return (NULL); @@ -72,9 +72,6 @@ } else { va = (void *)(vm_offset_t)PHYS_TO_DMAP(pa); } - - if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) - bzero(va, PAGE_SIZE); atomic_add_int(&hw_uma_mdpages, 1); return (va); Index: sys/riscv/riscv/pmap.c =================================================================== --- sys/riscv/riscv/pmap.c +++ sys/riscv/riscv/pmap.c @@ -1210,17 +1210,13 @@ /* * allocate the l1 page */ - while ((l1pt = vm_page_alloc(NULL, 0xdeadbeef, VM_ALLOC_NORMAL | - VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) - vm_wait(NULL); + l1pt = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO | + VM_ALLOC_WAITOK); l1phys = VM_PAGE_TO_PHYS(l1pt); pmap->pm_l1 = (pd_entry_t *)PHYS_TO_DMAP(l1phys); pmap->pm_satp = SATP_MODE_SV39 | (l1phys >> PAGE_SHIFT); - if ((l1pt->flags & PG_ZERO) == 0) - pagezero(pmap->pm_l1); - bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); CPU_ZERO(&pmap->pm_active); @@ -1262,8 +1258,8 @@ /* * Allocate a page table page. */ - if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | - VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { + m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO); + if (m == NULL) { if (lockp != NULL) { RELEASE_PV_LIST_LOCK(lockp); PMAP_UNLOCK(pmap); @@ -1279,9 +1275,7 @@ */ return (NULL); } - - if ((m->flags & PG_ZERO) == 0) - pmap_zero_page(m); + m->pindex = ptepindex; /* * Map the pagetable page into the process address space, if @@ -1475,13 +1469,11 @@ l1 = pmap_l1(kernel_pmap, kernel_vm_end); if (pmap_load(l1) == 0) { /* We need a new PDP entry */ - nkpg = vm_page_alloc(NULL, kernel_vm_end >> L1_SHIFT, - VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | + nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); - if ((nkpg->flags & PG_ZERO) == 0) - pmap_zero_page(nkpg); + nkpg->pindex = kernel_vm_end >> L1_SHIFT; paddr = VM_PAGE_TO_PHYS(nkpg); pn = (paddr / PAGE_SIZE); @@ -1503,14 +1495,11 @@ continue; } - nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_SHIFT, - VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | + nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); - if ((nkpg->flags & PG_ZERO) == 0) { - pmap_zero_page(nkpg); - } + nkpg->pindex = kernel_vm_end >> L2_SHIFT; paddr = VM_PAGE_TO_PHYS(nkpg); pn = (paddr / PAGE_SIZE); @@ -1690,8 +1679,7 @@ } } /* No free items, allocate another chunk */ - m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | - VM_ALLOC_WIRED); + m = vm_page_alloc_noobj(VM_ALLOC_WIRED); if (m == NULL) { if (lockp == NULL) { PV_STAT(pc_chunk_tryfail++); @@ -1757,8 +1745,7 @@ break; } for (reclaimed = false; avail < needed; avail += _NPCPV) { - m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | - VM_ALLOC_WIRED); + m = vm_page_alloc_noobj(VM_ALLOC_WIRED); if (m == NULL) { m = reclaim_pv_chunk(pmap, lockp); if (m == NULL) @@ -2477,10 +2464,9 @@ ("pmap_demote_l2_locked: oldl2 is not a leaf entry")); if ((oldl2 & PTE_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) == NULL) { - if ((oldl2 & PTE_A) == 0 || (mpte = vm_page_alloc(NULL, - pmap_l2_pindex(va), (VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : - VM_ALLOC_NORMAL) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == - NULL) { + if ((oldl2 & PTE_A) == 0 || (mpte = vm_page_alloc_noobj( + (VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : 0) | + VM_ALLOC_WIRED)) == NULL) { SLIST_INIT(&free); (void)pmap_remove_l2(pmap, l2, va & ~L2_OFFSET, pmap_load(pmap_l1(pmap, va)), &free, lockp); @@ -2489,6 +2475,7 @@ "failure for va %#lx in pmap %p", va, pmap); return (false); } + mpte->pindex = pmap_l2_pindex(va); if (va < VM_MAXUSER_ADDRESS) { mpte->ref_count = Ln_ENTRIES; pmap_resident_count_inc(pmap, 1); @@ -2740,13 +2727,10 @@ /* TODO: This is not optimal, but should mostly work */ if (l3 == NULL) { if (l2 == NULL) { - l2_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | - VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | + l2_m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (l2_m == NULL) panic("pmap_enter: l2 pte_m == NULL"); - if ((l2_m->flags & PG_ZERO) == 0) - pmap_zero_page(l2_m); l2_pa = VM_PAGE_TO_PHYS(l2_m); l2_pn = (l2_pa / PAGE_SIZE); @@ -2759,8 +2743,8 @@ l2 = pmap_l1_to_l2(l1, va); } - l3_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | - VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); + l3_m = vm_page_alloc_noobj(VM_ALLOC_WIRED | + VM_ALLOC_ZERO); if (l3_m == NULL) panic("pmap_enter: l3 pte_m == NULL"); if ((l3_m->flags & PG_ZERO) == 0) Index: sys/riscv/riscv/uma_machdep.c =================================================================== --- sys/riscv/riscv/uma_machdep.c +++ sys/riscv/riscv/uma_machdep.c @@ -46,16 +46,14 @@ void *va; *flags = UMA_SLAB_PRIV; - m = vm_page_alloc_domain(NULL, 0, domain, - malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); + m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) | + VM_ALLOC_WIRED); if (m == NULL) return (NULL); pa = m->phys_addr; if ((wait & M_NODUMP) == 0) dump_add_page(pa); va = (void *)PHYS_TO_DMAP(pa); - if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) - bzero(va, PAGE_SIZE); return (va); } Index: sys/vm/uma_core.c =================================================================== --- sys/vm/uma_core.c +++ sys/vm/uma_core.c @@ -1979,24 +1979,23 @@ MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE); TAILQ_INIT(&alloctail); - flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | - malloc2vm_flags(wait); + flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | malloc2vm_flags(wait); *pflag = UMA_SLAB_KERNEL; for (cpu = 0; cpu <= mp_maxid; cpu++) { if (CPU_ABSENT(cpu)) { - p = vm_page_alloc(NULL, 0, flags); + p = vm_page_alloc_noobj(flags); } else { #ifndef NUMA - p = vm_page_alloc(NULL, 0, flags); + p = vm_page_alloc_noobj(flags); #else pc = pcpu_find(cpu); if (__predict_false(VM_DOMAIN_EMPTY(pc->pc_domain))) p = NULL; else - p = vm_page_alloc_domain(NULL, 0, - pc->pc_domain, flags); + p = vm_page_alloc_noobj_domain(pc->pc_domain, + flags); if (__predict_false(p == NULL)) - p = vm_page_alloc(NULL, 0, flags); + p = vm_page_alloc_noobj(flags); #endif } if (__predict_false(p == NULL)) @@ -2039,16 +2038,17 @@ vm_offset_t retkva, zkva; vm_page_t p, p_next; uma_keg_t keg; + int req; TAILQ_INIT(&alloctail); keg = zone->uz_keg; + req = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; + if ((wait & M_WAITOK) != 0) + req |= VM_ALLOC_WAITOK; npages = howmany(bytes, PAGE_SIZE); while (npages > 0) { - p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT | - VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | - ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK : - VM_ALLOC_NOWAIT)); + p = vm_page_alloc_noobj_domain(domain, req); if (p != NULL) { /* * Since the page does not belong to an object, its Index: sys/vm/vm_kern.c =================================================================== --- sys/vm/vm_kern.c +++ sys/vm/vm_kern.c @@ -698,10 +698,7 @@ * zeros, while not using much more physical resources. */ addr = kva_alloc(ZERO_REGION_SIZE); - m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | - VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); - if ((m->flags & PG_ZERO) == 0) - pmap_zero_page(m); + m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO); for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE) pmap_qenter(addr + i, &m, 1); pmap_protect(kernel_pmap, addr, addr + ZERO_REGION_SIZE, VM_PROT_READ); Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -197,8 +197,7 @@ fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); - bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | - VM_ALLOC_NORMAL | VM_ALLOC_WIRED); + bogus_page = vm_page_alloc_noobj(VM_ALLOC_WIRED); } /*