Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -2026,39 +2026,27 @@ m_code = vm_page_alloc_contig(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_NOOBJ, 1, 0, (1ULL << 32), PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); - if ((m_code->flags & PG_ZERO) == 0) - pmap_zero_page(m_code); v_code = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_code)); m_pml5 = vm_page_alloc_contig(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_NOOBJ, 1, 0, (1ULL << 32), PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); - if ((m_pml5->flags & PG_ZERO) == 0) - pmap_zero_page(m_pml5); KPML5phys = VM_PAGE_TO_PHYS(m_pml5); v_pml5 = (pml5_entry_t *)PHYS_TO_DMAP(KPML5phys); m_pml4 = vm_page_alloc_contig(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_NOOBJ, 1, 0, (1ULL << 32), PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); - if ((m_pml4->flags & PG_ZERO) == 0) - pmap_zero_page(m_pml4); v_pml4 = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pml4)); m_pdp = vm_page_alloc_contig(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_NOOBJ, 1, 0, (1ULL << 32), PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); - if ((m_pdp->flags & PG_ZERO) == 0) - pmap_zero_page(m_pdp); v_pdp = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pdp)); m_pd = vm_page_alloc_contig(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_NOOBJ, 1, 0, (1ULL << 32), PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); - if ((m_pd->flags & PG_ZERO) == 0) - pmap_zero_page(m_pd); v_pd = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd)); m_pt = vm_page_alloc_contig(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_NOOBJ, 1, 0, (1ULL << 32), PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); - if ((m_pt->flags & PG_ZERO) == 0) - pmap_zero_page(m_pt); v_pt = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pt)); /* @@ -4208,8 +4196,6 @@ pmap->pm_pmltopu = NULL; pmap->pm_type = pm_type; - if ((pmltop_pg->flags & PG_ZERO) == 0) - pagezero(pmap->pm_pmltop); /* * Do not install the host kernel mappings in the nested page @@ -4414,8 +4400,6 @@ if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) return (NULL); - if ((m->flags & PG_ZERO) == 0) - pmap_zero_page(m); /* * Map the pagetable page into the process address space, if @@ -4792,8 +4776,6 @@ VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); - if ((nkpg->flags & PG_ZERO) == 0) - pmap_zero_page(nkpg); paddr = VM_PAGE_TO_PHYS(nkpg); *pdpe = (pdp_entry_t)(paddr | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M); @@ -4814,8 +4796,6 @@ VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); - if ((nkpg->flags & PG_ZERO) == 0) - pmap_zero_page(nkpg); paddr = VM_PAGE_TO_PHYS(nkpg); newpdir = paddr | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M; pde_store(pde, newpdir); @@ -10084,13 +10064,8 @@ static vm_page_t pmap_large_map_getptp_unlocked(void) { - vm_page_t m; - - m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | - VM_ALLOC_ZERO); - if (m != NULL && (m->flags & PG_ZERO) == 0) - pmap_zero_page(m); - return (m); + return (vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | + VM_ALLOC_ZERO)); } static vm_page_t Index: sys/amd64/amd64/uma_machdep.c =================================================================== --- sys/amd64/amd64/uma_machdep.c +++ sys/amd64/amd64/uma_machdep.c @@ -45,7 +45,6 @@ { vm_page_t m; vm_paddr_t pa; - void *va; *flags = UMA_SLAB_PRIV; m = vm_page_alloc_domain(NULL, 0, domain, @@ -55,10 +54,7 @@ pa = m->phys_addr; if ((wait & M_NODUMP) == 0) dump_add_page(pa); - va = (void *)PHYS_TO_DMAP(pa); - if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) - pagezero(va); - return (va); + return ((void *)PHYS_TO_DMAP(pa)); } void Index: sys/arm/nvidia/drm2/tegra_bo.c =================================================================== --- sys/arm/nvidia/drm2/tegra_bo.c +++ sys/arm/nvidia/drm2/tegra_bo.c @@ -121,8 +121,6 @@ } for (i = 0; i < npages; i++, m++) { - if ((m->flags & PG_ZERO) == 0) - pmap_zero_page(m); m->valid = VM_PAGE_BITS_ALL; (*ret_page)[i] = m; } Index: sys/arm64/arm64/pmap.c =================================================================== --- sys/arm64/arm64/pmap.c +++ sys/arm64/arm64/pmap.c @@ -1785,9 +1785,6 @@ pmap->pm_l0_paddr = VM_PAGE_TO_PHYS(m); pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr); - if ((m->flags & PG_ZERO) == 0) - pagezero(pmap->pm_l0); - pmap->pm_root.rt_root = 0; bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); pmap->pm_cookie = COOKIE_FROM(-1, INT_MAX); @@ -1868,8 +1865,6 @@ */ return (NULL); } - if ((m->flags & PG_ZERO) == 0) - pmap_zero_page(m); /* * Because of AArch64's weak memory consistency model, we must have a @@ -2187,8 +2182,6 @@ VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); - if ((nkpg->flags & PG_ZERO) == 0) - pmap_zero_page(nkpg); /* See the dmb() in _pmap_alloc_l3(). */ dmb(ishst); paddr = VM_PAGE_TO_PHYS(nkpg); @@ -2210,8 +2203,6 @@ VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); - if ((nkpg->flags & PG_ZERO) == 0) - pmap_zero_page(nkpg); /* See the dmb() in _pmap_alloc_l3(). */ dmb(ishst); paddr = VM_PAGE_TO_PHYS(nkpg); Index: sys/arm64/arm64/uma_machdep.c =================================================================== --- sys/arm64/arm64/uma_machdep.c +++ sys/arm64/arm64/uma_machdep.c @@ -43,7 +43,6 @@ { vm_page_t m; vm_paddr_t pa; - void *va; *flags = UMA_SLAB_PRIV; m = vm_page_alloc_domain(NULL, 0, domain, @@ -53,10 +52,7 @@ pa = m->phys_addr; if ((wait & M_NODUMP) == 0) dump_add_page(pa); - va = (void *)PHYS_TO_DMAP(pa); - if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) - pagezero(va); - return (va); + return ((void *)PHYS_TO_DMAP(pa)); } void Index: sys/compat/linuxkpi/common/src/linux_page.c =================================================================== --- sys/compat/linuxkpi/common/src/linux_page.c +++ sys/compat/linuxkpi/common/src/linux_page.c @@ -119,16 +119,6 @@ return (NULL); } } - if (flags & M_ZERO) { - unsigned long x; - - for (x = 0; x != npages; x++) { - vm_page_t pgo = page + x; - - if ((pgo->flags & PG_ZERO) == 0) - pmap_zero_page(pgo); - } - } } else { vm_offset_t vaddr; Index: sys/dev/xen/balloon/balloon.c =================================================================== --- sys/dev/xen/balloon/balloon.c +++ sys/dev/xen/balloon/balloon.c @@ -236,17 +236,6 @@ break; } - if ((page->flags & PG_ZERO) == 0) { - /* - * Zero the page, or else we might be leaking - * important data to other domains on the same - * host. Xen doesn't scrub ballooned out memory - * pages, the guest is in charge of making - * sure that no information is leaked. - */ - pmap_zero_page(page); - } - frame_list[i] = (VM_PAGE_TO_PHYS(page) >> PAGE_SHIFT); TAILQ_INSERT_HEAD(&ballooned_pages, page, plinks.q); Index: sys/dev/xen/gntdev/gntdev.c =================================================================== --- sys/dev/xen/gntdev/gntdev.c +++ sys/dev/xen/gntdev/gntdev.c @@ -375,13 +375,6 @@ error = ENOMEM; break; } - if ((grefs[i].page->flags & PG_ZERO) == 0) { - /* - * Zero the allocated page, as we don't want to - * leak our memory to other domains. - */ - pmap_zero_page(grefs[i].page); - } grefs[i].page->valid = VM_PAGE_BITS_ALL; error = gnttab_grant_foreign_access(arg->domid, Index: sys/i386/i386/pmap.c =================================================================== --- sys/i386/i386/pmap.c +++ sys/i386/i386/pmap.c @@ -2103,10 +2103,6 @@ } #endif - for (i = 0; i < NPGPTD; i++) - if ((pmap->pm_ptdpg[i]->flags & PG_ZERO) == 0) - pagezero(pmap->pm_pdir + (i * NPDEPG)); - /* Install the trampoline mapping. */ pmap->pm_pdir[TRPTDI] = PTD[TRPTDI]; @@ -2146,8 +2142,6 @@ */ return (NULL); } - if ((m->flags & PG_ZERO) == 0) - pmap_zero_page(m); /* * Map the pagetable page into the process address space, if @@ -2276,8 +2270,6 @@ nkpt++; - if ((nkpg->flags & PG_ZERO) == 0) - pmap_zero_page(nkpg); ptppaddr = VM_PAGE_TO_PHYS(nkpg); newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); pdir_pde(KPTD, kernel_vm_end) = newpdir; @@ -5933,8 +5925,6 @@ vmem_set_import(pmap_trm_arena, pmap_trm_import, NULL, NULL, PAGE_SIZE); pd_m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_NOBUSY | VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_WAITOK | VM_ALLOC_ZERO); - if ((pd_m->flags & PG_ZERO) == 0) - pmap_zero_page(pd_m); PTD[TRPTDI] = VM_PAGE_TO_PHYS(pd_m) | PG_M | PG_A | PG_RW | PG_V | pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, TRUE); } Index: sys/kern/uipc_shm.c =================================================================== --- sys/kern/uipc_shm.c +++ sys/kern/uipc_shm.c @@ -807,8 +807,6 @@ } try = 0; for (i = 0; i < pagesizes[psind] / PAGE_SIZE; i++) { - if ((m[i].flags & PG_ZERO) == 0) - pmap_zero_page(&m[i]); vm_page_valid(&m[i]); vm_page_xunbusy(&m[i]); } Index: sys/mips/mips/pmap.c =================================================================== --- sys/mips/mips/pmap.c +++ sys/mips/mips/pmap.c @@ -1115,10 +1115,6 @@ VM_ALLOC_ZERO); if (m == NULL) return (NULL); - - if ((m->flags & PG_ZERO) == 0) - pmap_zero_page(m); - m->pindex = index; return (m); } Index: sys/mips/mips/uma_machdep.c =================================================================== --- sys/mips/mips/uma_machdep.c +++ sys/mips/mips/uma_machdep.c @@ -46,7 +46,6 @@ vm_paddr_t pa; vm_page_t m; int pflags; - void *va; *flags = UMA_SLAB_PRIV; pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED; @@ -73,10 +72,7 @@ pa = VM_PAGE_TO_PHYS(m); if ((wait & M_NODUMP) == 0) dump_add_page(pa); - va = (void *)MIPS_PHYS_TO_DIRECT(pa); - if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) - bzero(va, PAGE_SIZE); - return (va); + return ((void *)MIPS_PHYS_TO_DIRECT(pa)); } void Index: sys/powerpc/aim/mmu_oea64.c =================================================================== --- sys/powerpc/aim/mmu_oea64.c +++ sys/powerpc/aim/mmu_oea64.c @@ -1943,10 +1943,7 @@ if (needed_lock) PMAP_UNLOCK(kernel_pmap); - if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) - bzero((void *)va, PAGE_SIZE); - - return (void *)va; + return ((void *)va); } extern int elf32_nxstack; Index: sys/powerpc/aim/mmu_radix.c =================================================================== --- sys/powerpc/aim/mmu_radix.c +++ sys/powerpc/aim/mmu_radix.c @@ -3505,8 +3505,6 @@ VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); - if ((nkpg->flags & PG_ZERO) == 0) - mmu_radix_zero_page(nkpg); paddr = VM_PAGE_TO_PHYS(nkpg); pde_store(l2e, paddr); continue; /* try again */ @@ -3526,8 +3524,6 @@ VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); - if ((nkpg->flags & PG_ZERO) == 0) - mmu_radix_zero_page(nkpg); paddr = VM_PAGE_TO_PHYS(nkpg); pde_store(l3e, paddr); @@ -4228,8 +4224,6 @@ */ return (NULL); } - if ((m->flags & PG_ZERO) == 0) - mmu_radix_zero_page(m); /* * Map the pagetable page into the process address space, if Index: sys/powerpc/aim/slb.c =================================================================== --- sys/powerpc/aim/slb.c +++ sys/powerpc/aim/slb.c @@ -512,10 +512,6 @@ va = (void *)(VM_PAGE_TO_PHYS(m) | DMAP_BASE_ADDRESS); pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m)); } - - if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) - bzero(va, PAGE_SIZE); - return (va); } Index: sys/powerpc/booke/pmap_64.c =================================================================== --- sys/powerpc/booke/pmap_64.c +++ sys/powerpc/booke/pmap_64.c @@ -168,11 +168,6 @@ rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); } - - if (!(m->flags & PG_ZERO)) - /* Zero whole ptbl. */ - mmu_booke_zero_page(m); - return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m))); } Index: sys/powerpc/powerpc/uma_machdep.c =================================================================== --- sys/powerpc/powerpc/uma_machdep.c +++ sys/powerpc/powerpc/uma_machdep.c @@ -71,9 +71,6 @@ } else { va = (void *)(vm_offset_t)PHYS_TO_DMAP(pa); } - - if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) - bzero(va, PAGE_SIZE); atomic_add_int(&hw_uma_mdpages, 1); return (va); Index: sys/riscv/riscv/pmap.c =================================================================== --- sys/riscv/riscv/pmap.c +++ sys/riscv/riscv/pmap.c @@ -1209,9 +1209,6 @@ pmap->pm_l1 = (pd_entry_t *)PHYS_TO_DMAP(l1phys); pmap->pm_satp = SATP_MODE_SV39 | (l1phys >> PAGE_SHIFT); - if ((l1pt->flags & PG_ZERO) == 0) - pagezero(pmap->pm_l1); - bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); CPU_ZERO(&pmap->pm_active); @@ -1271,9 +1268,6 @@ return (NULL); } - if ((m->flags & PG_ZERO) == 0) - pmap_zero_page(m); - /* * Map the pagetable page into the process address space, if * it isn't already there. @@ -1464,8 +1458,6 @@ VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); - if ((nkpg->flags & PG_ZERO) == 0) - pmap_zero_page(nkpg); paddr = VM_PAGE_TO_PHYS(nkpg); pn = (paddr / PAGE_SIZE); @@ -1492,9 +1484,6 @@ VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); - if ((nkpg->flags & PG_ZERO) == 0) { - pmap_zero_page(nkpg); - } paddr = VM_PAGE_TO_PHYS(nkpg); pn = (paddr / PAGE_SIZE); @@ -2706,8 +2695,6 @@ VM_ALLOC_ZERO); if (l2_m == NULL) panic("pmap_enter: l2 pte_m == NULL"); - if ((l2_m->flags & PG_ZERO) == 0) - pmap_zero_page(l2_m); l2_pa = VM_PAGE_TO_PHYS(l2_m); l2_pn = (l2_pa / PAGE_SIZE); @@ -2724,8 +2711,6 @@ VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (l3_m == NULL) panic("pmap_enter: l3 pte_m == NULL"); - if ((l3_m->flags & PG_ZERO) == 0) - pmap_zero_page(l3_m); l3_pa = VM_PAGE_TO_PHYS(l3_m); l3_pn = (l3_pa / PAGE_SIZE); Index: sys/riscv/riscv/uma_machdep.c =================================================================== --- sys/riscv/riscv/uma_machdep.c +++ sys/riscv/riscv/uma_machdep.c @@ -42,7 +42,6 @@ { vm_page_t m; vm_paddr_t pa; - void *va; *flags = UMA_SLAB_PRIV; m = vm_page_alloc_domain(NULL, 0, domain, @@ -52,10 +51,7 @@ pa = m->phys_addr; if ((wait & M_NODUMP) == 0) dump_add_page(pa); - va = (void *)PHYS_TO_DMAP(pa); - if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) - bzero(va, PAGE_SIZE); - return (va); + return ((void *)PHYS_TO_DMAP(pa)); } void Index: sys/vm/vm_kern.c =================================================================== --- sys/vm/vm_kern.c +++ sys/vm/vm_kern.c @@ -244,8 +244,6 @@ KASSERT(vm_page_domain(m) == domain, ("kmem_alloc_attr_domain: Domain mismatch %d != %d", vm_page_domain(m), domain)); - if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) - pmap_zero_page(m); vm_page_valid(m); pmap_enter(kernel_pmap, addr + i, m, prot, prot | PMAP_ENTER_WIRED, 0); @@ -324,8 +322,6 @@ end_m = m + npages; tmp = addr; for (; m < end_m; m++) { - if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) - pmap_zero_page(m); vm_page_valid(m); pmap_enter(kernel_pmap, tmp, m, VM_PROT_RW, VM_PROT_RW | PMAP_ENTER_WIRED, 0); @@ -497,8 +493,6 @@ KASSERT(vm_page_domain(m) == domain, ("kmem_back_domain: Domain mismatch %d != %d", vm_page_domain(m), domain)); - if (flags & M_ZERO && (m->flags & PG_ZERO) == 0) - pmap_zero_page(m); KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("kmem_malloc: page %p is managed", m)); vm_page_valid(m); @@ -692,8 +686,6 @@ addr = kva_alloc(ZERO_REGION_SIZE); m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); - if ((m->flags & PG_ZERO) == 0) - pmap_zero_page(m); for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE) pmap_qenter(addr + i, &m, 1); pmap_protect(kernel_pmap, addr, addr + ZERO_REGION_SIZE, VM_PROT_READ);