Index: head/share/man/man9/Makefile =================================================================== --- head/share/man/man9/Makefile +++ head/share/man/man9/Makefile @@ -1391,8 +1391,7 @@ MLINKS+=pmap_remove.9 pmap_remove_all.9 \ pmap_remove.9 pmap_remove_pages.9 MLINKS+=pmap_resident_count.9 pmap_wired_count.9 -MLINKS+=pmap_zero_page.9 pmap_zero_area.9 \ - pmap_zero_page.9 pmap_zero_idle.9 +MLINKS+=pmap_zero_page.9 pmap_zero_area.9 MLINKS+=printf.9 log.9 \ printf.9 tprintf.9 \ printf.9 uprintf.9 Index: head/share/man/man9/pmap.9 =================================================================== --- head/share/man/man9/pmap.9 +++ head/share/man/man9/pmap.9 @@ -25,7 +25,7 @@ .\" .\" $FreeBSD$ .\" -.Dd August 3, 2014 +.Dd August 30, 2016 .Dt PMAP 9 .Os .Sh NAME @@ -121,7 +121,6 @@ .Xr pmap_unwire 9 , .Xr pmap_wired_count 9 , .Xr pmap_zero_area 9 , -.Xr pmap_zero_idle 9 , .Xr pmap_zero_page 9 , .Xr vm_map 9 .Sh AUTHORS Index: head/share/man/man9/pmap_zero_page.9 =================================================================== --- head/share/man/man9/pmap_zero_page.9 +++ head/share/man/man9/pmap_zero_page.9 @@ -25,13 +25,12 @@ .\" .\" $FreeBSD$ .\" -.Dd July 21, 2003 +.Dd August 30, 2016 .Dt PMAP_ZERO 9 .Os .Sh NAME .Nm pmap_zero_page , .Nm pmap_zero_area , -.Nm pmap_zero_idle .Nd zero-fill a page using machine-dependent optimizations .Sh SYNOPSIS .In sys/param.h @@ -41,8 +40,6 @@ .Fn pmap_zero_page "vm_page_t m" .Ft void .Fn pmap_zero_page_area "vm_page_t m" "int off" "int size" -.Ft void -.Fn pmap_zero_page_idle "vm_page_t m" .Sh DESCRIPTION The .Fn pmap_zero_page @@ -53,14 +50,6 @@ The range specified must not cross a page boundary; it must be contained entirely within a single page. .Pp -The -.Fn pmap_zero_page_idle -interface is used by the -.Nm vm_pagezero -process. -The system-wide -.Va Giant -lock should not be required to be held in order to call this interface. .Sh IMPLEMENTATION NOTES This function is required to be implemented for each architecture supported by .Fx . Index: head/sys/amd64/amd64/pmap.c =================================================================== --- head/sys/amd64/amd64/pmap.c +++ head/sys/amd64/amd64/pmap.c @@ -5179,19 +5179,6 @@ } /* - * Zero the specified hardware page in a way that minimizes cache thrashing. - * This is intended to be called from the vm_pagezero process only and - * outside of Giant. - */ -void -pmap_zero_page_idle(vm_page_t m) -{ - vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); - - sse2_pagezero((void *)va); -} - -/* * Copy 1 specified hardware page to another. */ void Index: head/sys/arm/arm/pmap-v4.c =================================================================== --- head/sys/arm/arm/pmap-v4.c +++ head/sys/arm/arm/pmap-v4.c @@ -4079,19 +4079,6 @@ } -/* - * pmap_zero_page_idle zeros the specified hardware page by mapping - * the page into KVM and using bzero to clear its contents. This - * is intended to be called from the vm_pagezero process only and - * outside of Giant. - */ -void -pmap_zero_page_idle(vm_page_t m) -{ - - pmap_zero_page(m); -} - #if 0 /* * pmap_clean_page() Index: head/sys/arm/arm/pmap-v6.c =================================================================== --- head/sys/arm/arm/pmap-v6.c +++ head/sys/arm/arm/pmap-v6.c @@ -306,8 +306,6 @@ caddr_t CADDR3; }; static struct sysmaps sysmaps_pcpu[MAXCPU]; -static pt2_entry_t *CMAP3; -static caddr_t CADDR3; caddr_t _tmppt = 0; struct msgbuf *msgbufp = NULL; /* XXX move it to machdep.c */ @@ -1176,7 +1174,6 @@ /* * Local CMAP1/CMAP2 are used for zeroing and copying pages. * Local CMAP3 is used for data cache cleaning. - * Global CMAP3 is used for the idle process page zeroing. */ for (i = 0; i < MAXCPU; i++) { sysmaps = &sysmaps_pcpu[i]; @@ -1185,7 +1182,6 @@ SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1); SYSMAP(caddr_t, sysmaps->CMAP3, sysmaps->CADDR3, 1); } - SYSMAP(caddr_t, CMAP3, CADDR3, 1); /* * Crashdump maps. @@ -5805,27 +5801,6 @@ } /* - * pmap_zero_page_idle zeros the specified hardware page by mapping - * the page into KVM and using bzero to clear its contents. This - * is intended to be called from the vm_pagezero process only and - * outside of Giant. - */ -void -pmap_zero_page_idle(vm_page_t m) -{ - - if (pte2_load(CMAP3) != 0) - panic("%s: CMAP3 busy", __func__); - sched_pin(); - pte2_store(CMAP3, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, - vm_page_pte2_attr(m))); - pagezero(CADDR3); - pte2_clear(CMAP3); - tlb_flush((vm_offset_t)CADDR3); - sched_unpin(); -} - -/* * pmap_copy_page copies the specified (machine independent) * page by mapping the page into virtual memory and using * bcopy to copy the page, one machine dependent page at a Index: head/sys/arm64/arm64/pmap.c =================================================================== --- head/sys/arm64/arm64/pmap.c +++ head/sys/arm64/arm64/pmap.c @@ -3264,20 +3264,6 @@ } /* - * pmap_zero_page_idle zeros the specified hardware page by mapping - * the page into KVM and using bzero to clear its contents. This - * is intended to be called from the vm_pagezero process only and - * outside of Giant. - */ -void -pmap_zero_page_idle(vm_page_t m) -{ - vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); - - pagezero((void *)va); -} - -/* * pmap_copy_page copies the specified (machine independent) * page by mapping the page into virtual memory and using * bcopy to copy the page, one machine dependent page at a Index: head/sys/conf/files =================================================================== --- head/sys/conf/files +++ head/sys/conf/files @@ -4369,7 +4369,6 @@ vm/vm_reserv.c standard vm/vm_domain.c standard vm/vm_unix.c standard -vm/vm_zeroidle.c standard vm/vnode_pager.c standard xen/features.c optional xenhvm xen/xenbus/xenbus_if.m optional xenhvm Index: head/sys/i386/i386/pmap.c =================================================================== --- head/sys/i386/i386/pmap.c +++ head/sys/i386/i386/pmap.c @@ -444,7 +444,7 @@ /* * CMAP1/CMAP2 are used for zeroing and copying pages. - * CMAP3 is used for the idle process page zeroing. + * CMAP3 is used for the boot-time memory test. */ for (i = 0; i < MAXCPU; i++) { sysmaps = &sysmaps_pcpu[i]; @@ -452,7 +452,7 @@ SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1) SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1) } - SYSMAP(caddr_t, CMAP3, CADDR3, 1) + SYSMAP(caddr_t, CMAP3, CADDR3, 1); /* * Crashdump maps. @@ -4242,26 +4242,6 @@ } /* - * Zero the specified hardware page in a way that minimizes cache thrashing. - * This is intended to be called from the vm_pagezero process only and - * outside of Giant. - */ -void -pmap_zero_page_idle(vm_page_t m) -{ - - if (*CMAP3) - panic("pmap_zero_page_idle: CMAP3 busy"); - sched_pin(); - *CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | - pmap_cache_bits(m->md.pat_mode, 0); - invlcaddr(CADDR3); - pagezero(CADDR3); - *CMAP3 = 0; - sched_unpin(); -} - -/* * Copy 1 specified hardware page to another. */ void Index: head/sys/i386/include/pmap.h =================================================================== --- head/sys/i386/include/pmap.h +++ head/sys/i386/include/pmap.h @@ -353,7 +353,7 @@ #ifdef _KERNEL -extern caddr_t CADDR3; +extern caddr_t CADDR3; extern pt_entry_t *CMAP3; extern vm_paddr_t phys_avail[]; extern vm_paddr_t dump_avail[]; Index: head/sys/mips/mips/pmap.c =================================================================== --- head/sys/mips/mips/pmap.c +++ head/sys/mips/mips/pmap.c @@ -2558,24 +2558,6 @@ } } -void -pmap_zero_page_idle(vm_page_t m) -{ - vm_offset_t va; - vm_paddr_t phys = VM_PAGE_TO_PHYS(m); - - if (MIPS_DIRECT_MAPPABLE(phys)) { - va = MIPS_PHYS_TO_DIRECT(phys); - bzero((caddr_t)va, PAGE_SIZE); - mips_dcache_wbinv_range(va, PAGE_SIZE); - } else { - va = pmap_lmem_map1(phys); - bzero((caddr_t)va, PAGE_SIZE); - mips_dcache_wbinv_range(va, PAGE_SIZE); - pmap_lmem_unmap(); - } -} - /* * pmap_copy_page copies the specified (machine independent) * page by mapping the page into virtual memory and using Index: head/sys/powerpc/aim/mmu_oea.c =================================================================== --- head/sys/powerpc/aim/mmu_oea.c +++ head/sys/powerpc/aim/mmu_oea.c @@ -300,7 +300,6 @@ void moea_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t); void moea_zero_page(mmu_t, vm_page_t); void moea_zero_page_area(mmu_t, vm_page_t, int, int); -void moea_zero_page_idle(mmu_t, vm_page_t); void moea_activate(mmu_t, struct thread *); void moea_deactivate(mmu_t, struct thread *); void moea_cpu_bootstrap(mmu_t, int); @@ -349,7 +348,6 @@ MMUMETHOD(mmu_unwire, moea_unwire), MMUMETHOD(mmu_zero_page, moea_zero_page), MMUMETHOD(mmu_zero_page_area, moea_zero_page_area), - MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle), MMUMETHOD(mmu_activate, moea_activate), MMUMETHOD(mmu_deactivate, moea_deactivate), MMUMETHOD(mmu_page_set_memattr, moea_page_set_memattr), @@ -1081,13 +1079,6 @@ bzero(va, size); } -void -moea_zero_page_idle(mmu_t mmu, vm_page_t m) -{ - - moea_zero_page(mmu, m); -} - vm_offset_t moea_quick_enter_page(mmu_t mmu, vm_page_t m) { Index: head/sys/powerpc/aim/mmu_oea64.c =================================================================== --- head/sys/powerpc/aim/mmu_oea64.c +++ head/sys/powerpc/aim/mmu_oea64.c @@ -265,7 +265,6 @@ void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t); void moea64_zero_page(mmu_t, vm_page_t); void moea64_zero_page_area(mmu_t, vm_page_t, int, int); -void moea64_zero_page_idle(mmu_t, vm_page_t); void moea64_activate(mmu_t, struct thread *); void moea64_deactivate(mmu_t, struct thread *); void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t); @@ -314,7 +313,6 @@ MMUMETHOD(mmu_unwire, moea64_unwire), MMUMETHOD(mmu_zero_page, moea64_zero_page), MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area), - MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle), MMUMETHOD(mmu_activate, moea64_activate), MMUMETHOD(mmu_deactivate, moea64_deactivate), MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr), @@ -1230,13 +1228,6 @@ mtx_unlock(&moea64_scratchpage_mtx); } -void -moea64_zero_page_idle(mmu_t mmu, vm_page_t m) -{ - - moea64_zero_page(mmu, m); -} - vm_offset_t moea64_quick_enter_page(mmu_t mmu, vm_page_t m) { Index: head/sys/powerpc/booke/pmap.c =================================================================== --- head/sys/powerpc/booke/pmap.c +++ head/sys/powerpc/booke/pmap.c @@ -130,12 +130,6 @@ static struct mtx tlbivax_mutex; -/* - * Reserved KVA space for mmu_booke_zero_page_idle. This is used - * by idle thred only, no lock required. - */ -static vm_offset_t zero_page_idle_va; - /* Reserved KVA space and mutex for mmu_booke_copy_page. */ static vm_offset_t copy_page_src_va; static vm_offset_t copy_page_dst_va; @@ -312,7 +306,6 @@ static void mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t); static void mmu_booke_zero_page(mmu_t, vm_page_t); static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); -static void mmu_booke_zero_page_idle(mmu_t, vm_page_t); static void mmu_booke_activate(mmu_t, struct thread *); static void mmu_booke_deactivate(mmu_t, struct thread *); static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); @@ -371,7 +364,6 @@ MMUMETHOD(mmu_unwire, mmu_booke_unwire), MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), - MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), MMUMETHOD(mmu_activate, mmu_booke_activate), MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), MMUMETHOD(mmu_quick_enter_page, mmu_booke_quick_enter_page), @@ -1147,14 +1139,11 @@ /* Allocate KVA space for page zero/copy operations. */ zero_page_va = virtual_avail; virtual_avail += PAGE_SIZE; - zero_page_idle_va = virtual_avail; - virtual_avail += PAGE_SIZE; copy_page_src_va = virtual_avail; virtual_avail += PAGE_SIZE; copy_page_dst_va = virtual_avail; virtual_avail += PAGE_SIZE; debugf("zero_page_va = 0x%08x\n", zero_page_va); - debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va); debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va); debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va); @@ -2326,23 +2315,6 @@ mtx_unlock(©_page_mutex); } -/* - * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it - * into virtual memory and using bzero to clear its contents. This is intended - * to be called from the vm_pagezero process only and outside of Giant. No - * lock is required. - */ -static void -mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) -{ - vm_offset_t va; - - va = zero_page_idle_va; - mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); - bzero((caddr_t)va, PAGE_SIZE); - mmu_booke_kremove(mmu, va); -} - static vm_offset_t mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m) { Index: head/sys/powerpc/powerpc/mmu_if.m =================================================================== --- head/sys/powerpc/powerpc/mmu_if.m +++ head/sys/powerpc/powerpc/mmu_if.m @@ -659,18 +659,6 @@ /** - * @brief Called from the idle loop to zero pages. XXX I think locking - * constraints might be different here compared to zero_page. - * - * @param _pg physical page - */ -METHOD void zero_page_idle { - mmu_t _mmu; - vm_page_t _pg; -}; - - -/** * @brief Extract mincore(2) information from a mapping. * * @param _pmap physical map Index: head/sys/powerpc/powerpc/pmap_dispatch.c =================================================================== --- head/sys/powerpc/powerpc/pmap_dispatch.c +++ head/sys/powerpc/powerpc/pmap_dispatch.c @@ -380,14 +380,6 @@ MMU_ZERO_PAGE_AREA(mmu_obj, m, off, size); } -void -pmap_zero_page_idle(vm_page_t m) -{ - - CTR2(KTR_PMAP, "%s(%p)", __func__, m); - MMU_ZERO_PAGE_IDLE(mmu_obj, m); -} - int pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) { Index: head/sys/riscv/riscv/pmap.c =================================================================== --- head/sys/riscv/riscv/pmap.c +++ head/sys/riscv/riscv/pmap.c @@ -2538,20 +2538,6 @@ } /* - * pmap_zero_page_idle zeros the specified hardware page by mapping - * the page into KVM and using bzero to clear its contents. This - * is intended to be called from the vm_pagezero process only and - * outside of Giant. - */ -void -pmap_zero_page_idle(vm_page_t m) -{ - vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); - - pagezero((void *)va); -} - -/* * pmap_copy_page copies the specified (machine independent) * page by mapping the page into virtual memory and using * bcopy to copy the page, one machine dependent page at a Index: head/sys/sparc64/sparc64/pmap.c =================================================================== --- head/sys/sparc64/sparc64/pmap.c +++ head/sys/sparc64/sparc64/pmap.c @@ -223,10 +223,6 @@ PMAP_STATS_VAR(pmap_nzero_page_area_c); PMAP_STATS_VAR(pmap_nzero_page_area_oc); PMAP_STATS_VAR(pmap_nzero_page_area_nc); -PMAP_STATS_VAR(pmap_nzero_page_idle); -PMAP_STATS_VAR(pmap_nzero_page_idle_c); -PMAP_STATS_VAR(pmap_nzero_page_idle_oc); -PMAP_STATS_VAR(pmap_nzero_page_idle_nc); PMAP_STATS_VAR(pmap_ncopy_page); PMAP_STATS_VAR(pmap_ncopy_page_c); PMAP_STATS_VAR(pmap_ncopy_page_oc); @@ -1849,35 +1845,6 @@ } void -pmap_zero_page_idle(vm_page_t m) -{ - struct tte *tp; - vm_offset_t va; - vm_paddr_t pa; - - KASSERT((m->flags & PG_FICTITIOUS) == 0, - ("pmap_zero_page_idle: fake page")); - PMAP_STATS_INC(pmap_nzero_page_idle); - pa = VM_PAGE_TO_PHYS(m); - if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa)) { - PMAP_STATS_INC(pmap_nzero_page_idle_c); - va = TLB_PHYS_TO_DIRECT(pa); - cpu_block_zero((void *)va, PAGE_SIZE); - } else if (m->md.color == -1) { - PMAP_STATS_INC(pmap_nzero_page_idle_nc); - aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE); - } else { - PMAP_STATS_INC(pmap_nzero_page_idle_oc); - va = pmap_idle_map + (m->md.color * PAGE_SIZE); - tp = tsb_kvtotte(va); - tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W; - tp->tte_vpn = TV_VPN(va, TS_8K); - cpu_block_zero((void *)va, PAGE_SIZE); - tlb_page_demap(kernel_pmap, va); - } -} - -void pmap_copy_page(vm_page_t msrc, vm_page_t mdst) { vm_offset_t vdst; Index: head/sys/vm/pmap.h =================================================================== --- head/sys/vm/pmap.h +++ head/sys/vm/pmap.h @@ -153,7 +153,6 @@ void pmap_unwire(pmap_t pmap, vm_offset_t start, vm_offset_t end); void pmap_zero_page(vm_page_t); void pmap_zero_page_area(vm_page_t, int off, int size); -void pmap_zero_page_idle(vm_page_t); #define pmap_resident_count(pm) ((pm)->pm_stats.resident_count) #define pmap_wired_count(pm) ((pm)->pm_stats.wired_count) Index: head/sys/vm/vm_meter.c =================================================================== --- head/sys/vm/vm_meter.c +++ head/sys/vm/vm_meter.c @@ -306,6 +306,3 @@ VM_STATS_VM(v_vforkpages, "VM pages affected by vfork()"); VM_STATS_VM(v_rforkpages, "VM pages affected by rfork()"); VM_STATS_VM(v_kthreadpages, "VM pages affected by fork() by kernel"); - -SYSCTL_INT(_vm_stats_misc, OID_AUTO, zero_page_count, CTLFLAG_RD, - &vm_page_zero_count, 0, "Number of zero-ed free pages"); Index: head/sys/vm/vm_page.h =================================================================== --- head/sys/vm/vm_page.h +++ head/sys/vm/vm_page.h @@ -504,7 +504,6 @@ vm_page_bits_t vm_page_bits(int base, int size); void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid); void vm_page_free_toq(vm_page_t m); -void vm_page_zero_idle_wakeup(void); void vm_page_dirty_KBI(vm_page_t m); void vm_page_lock_KBI(vm_page_t m, const char *file, int line); Index: head/sys/vm/vm_page.c =================================================================== --- head/sys/vm/vm_page.c +++ head/sys/vm/vm_page.c @@ -134,7 +134,6 @@ vm_page_t vm_page_array; long vm_page_array_size; long first_page; -int vm_page_zero_count; static int boot_pages = UMA_BOOT_PAGES; SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, @@ -1735,8 +1734,6 @@ KASSERT(m->valid == 0, ("vm_page_alloc: free page %p is valid", m)); vm_phys_freecnt_adj(m, -1); - if ((m->flags & PG_ZERO) != 0) - vm_page_zero_count--; } mtx_unlock(&vm_page_queue_free_mtx); @@ -2042,8 +2039,6 @@ KASSERT(m->valid == 0, ("vm_page_alloc_init: free page %p is valid", m)); vm_phys_freecnt_adj(m, -1); - if ((m->flags & PG_ZERO) != 0) - vm_page_zero_count--; } return (drop); } @@ -2597,7 +2592,6 @@ #endif vm_phys_free_pages(m, 0); } while ((m = SLIST_FIRST(&free)) != NULL); - vm_page_zero_idle_wakeup(); vm_page_free_wakeup(); mtx_unlock(&vm_page_queue_free_mtx); } @@ -3041,10 +3035,6 @@ if (TRUE) #endif vm_phys_free_pages(m, 0); - if ((m->flags & PG_ZERO) != 0) - ++vm_page_zero_count; - else - vm_page_zero_idle_wakeup(); vm_page_free_wakeup(); mtx_unlock(&vm_page_queue_free_mtx); } Index: head/sys/vm/vm_phys.h =================================================================== --- head/sys/vm/vm_phys.h +++ head/sys/vm/vm_phys.h @@ -88,7 +88,6 @@ u_long alignment, vm_paddr_t boundary, int options); void vm_phys_set_pool(int pool, vm_page_t m, int order); boolean_t vm_phys_unfree_page(vm_page_t m); -boolean_t vm_phys_zero_pages_idle(void); int vm_phys_mem_affinity(int f, int t); /* Index: head/sys/vm/vm_phys.c =================================================================== --- head/sys/vm/vm_phys.c +++ head/sys/vm/vm_phys.c @@ -132,10 +132,6 @@ CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY); #endif -static int cnt_prezero; -SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD, - &cnt_prezero, 0, "The number of physical pages prezeroed at idle time"); - static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS); SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info"); @@ -1298,53 +1294,6 @@ } /* - * Try to zero one physical page. Used by an idle priority thread. - */ -boolean_t -vm_phys_zero_pages_idle(void) -{ - static struct vm_freelist *fl; - static int flind, oind, pind; - vm_page_t m, m_tmp; - int domain; - - domain = vm_rr_selectdomain(); - fl = vm_phys_free_queues[domain][0][0]; - mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); - for (;;) { - TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, plinks.q) { - for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) { - if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) { - vm_phys_unfree_page(m_tmp); - vm_phys_freecnt_adj(m, -1); - mtx_unlock(&vm_page_queue_free_mtx); - pmap_zero_page_idle(m_tmp); - m_tmp->flags |= PG_ZERO; - mtx_lock(&vm_page_queue_free_mtx); - vm_phys_freecnt_adj(m, 1); - vm_phys_free_pages(m_tmp, 0); - vm_page_zero_count++; - cnt_prezero++; - return (TRUE); - } - } - } - oind++; - if (oind == VM_NFREEORDER) { - oind = 0; - pind++; - if (pind == VM_NFREEPOOL) { - pind = 0; - flind++; - if (flind == vm_nfreelists) - flind = 0; - } - fl = vm_phys_free_queues[domain][flind][pind]; - } - } -} - -/* * Allocate a contiguous set of physical pages of the given size * "npages" from the free lists. All of the physical pages must be at * or above the given physical address "low" and below the given Index: head/sys/vm/vm_zeroidle.c =================================================================== --- head/sys/vm/vm_zeroidle.c +++ head/sys/vm/vm_zeroidle.c @@ -1,162 +0,0 @@ -/*- - * Copyright (c) 1994 John Dyson - * Copyright (c) 2001 Matt Dillon - * - * All Rights Reserved. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 - * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ - * from: FreeBSD: .../i386/vm_machdep.c,v 1.165 2001/07/04 23:27:04 dillon - */ - -#include -__FBSDID("$FreeBSD$"); - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -static int idlezero_enable_default = 0; -/* Defer setting the enable flag until the kthread is running. */ -static int idlezero_enable = 0; -SYSCTL_INT(_vm, OID_AUTO, idlezero_enable, CTLFLAG_RWTUN, &idlezero_enable, 0, - "Allow the kernel to use idle cpu cycles to zero-out pages"); -/* - * Implement the pre-zeroed page mechanism. - */ - -#define ZIDLE_LO(v) ((v) * 2 / 3) -#define ZIDLE_HI(v) ((v) * 4 / 5) - -static boolean_t wakeup_needed = FALSE; -static int zero_state; - -static int -vm_page_zero_check(void) -{ - - if (!idlezero_enable) - return (0); - /* - * Attempt to maintain approximately 1/2 of our free pages in a - * PG_ZERO'd state. Add some hysteresis to (attempt to) avoid - * generally zeroing a page when the system is near steady-state. - * Otherwise we might get 'flutter' during disk I/O / IPC or - * fast sleeps. We also do not want to be continuously zeroing - * pages because doing so may flush our L1 and L2 caches too much. - */ - if (zero_state && vm_page_zero_count >= ZIDLE_LO(vm_cnt.v_free_count)) - return (0); - if (vm_page_zero_count >= ZIDLE_HI(vm_cnt.v_free_count)) - return (0); - return (1); -} - -static void -vm_page_zero_idle(void) -{ - - mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); - zero_state = 0; - if (vm_phys_zero_pages_idle()) { - if (vm_page_zero_count >= ZIDLE_HI(vm_cnt.v_free_count)) - zero_state = 1; - } -} - -/* Called by vm_page_free to hint that a new page is available. */ -void -vm_page_zero_idle_wakeup(void) -{ - - mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); - if (wakeup_needed && vm_page_zero_check()) { - wakeup_needed = FALSE; - wakeup(&zero_state); - } -} - -static void -vm_pagezero(void __unused *arg) -{ - - idlezero_enable = idlezero_enable_default; - - mtx_lock(&vm_page_queue_free_mtx); - for (;;) { - if (vm_page_zero_check()) { - vm_page_zero_idle(); -#ifndef PREEMPTION - if (sched_runnable()) { - thread_lock(curthread); - mi_switch(SW_VOL | SWT_IDLE, NULL); - thread_unlock(curthread); - } -#endif - } else { - wakeup_needed = TRUE; - msleep(&zero_state, &vm_page_queue_free_mtx, 0, - "pgzero", hz * 300); - } - } -} - -static void -pagezero_start(void __unused *arg) -{ - int error; - struct proc *p; - struct thread *td; - - error = kproc_create(vm_pagezero, NULL, &p, RFSTOPPED, 0, "pagezero"); - if (error) - panic("pagezero_start: error %d\n", error); - td = FIRST_THREAD_IN_PROC(p); - thread_lock(td); - - /* We're an idle task, don't count us in the load. */ - td->td_flags |= TDF_NOLOAD; - sched_class(td, PRI_IDLE); - sched_prio(td, PRI_MAX_IDLE); - sched_add(td, SRQ_BORING); - thread_unlock(td); -} -SYSINIT(pagezero, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, pagezero_start, NULL);