Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -2216,20 +2216,6 @@ /*************************************************** * Page table page management routines..... ***************************************************/ -static __inline void -pmap_free_zero_pages(struct spglist *free) -{ - vm_page_t m; - int count; - - for (count = 0; (m = SLIST_FIRST(free)) != NULL; count++) { - SLIST_REMOVE_HEAD(free, plinks.s.ss); - /* Preserve the page's PG_ZERO setting. */ - vm_page_free_toq(m); - } - atomic_subtract_int(&vm_cnt.v_wire_count, count); -} - /* * Schedule the specified unused page table page to be freed. Specifically, * add the page to the specified list of pages that will be released to the @@ -3082,7 +3068,7 @@ /* Recycle a freed page table page. */ m_pc->wire_count = 1; } - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, true); return (m_pc); } @@ -3581,7 +3567,7 @@ pmap_remove_pde(pmap, pde, sva, &free, lockp); if ((oldpde & PG_G) == 0) pmap_invalidate_pde_page(pmap, sva, oldpde); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, true); CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx" " in pmap %p", va, pmap); return (FALSE); @@ -3983,7 +3969,7 @@ pmap_invalidate_all(pmap); PMAP_UNLOCK(pmap); pmap_delayed_invl_finished(); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, true); } /* @@ -4081,7 +4067,7 @@ vm_page_aflag_clear(m, PGA_WRITEABLE); rw_wunlock(lock); pmap_delayed_invl_wait(m); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, true); } /* @@ -4749,7 +4735,7 @@ pmap_invalidate_all(pmap); pmap_delayed_invl_finished(); } - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, true); if (va >= VM_MAXUSER_ADDRESS) { mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME); if (pmap_insert_pt_page(pmap, mt)) { @@ -4778,7 +4764,7 @@ * pages. Invalidate those entries. */ pmap_invalidate_page(pmap, va); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, true); } CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" " in pmap %p", va, pmap); @@ -4959,7 +4945,7 @@ * pages. Invalidate those entries. */ pmap_invalidate_page(pmap, va); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, true); } mpte = NULL; } @@ -5337,7 +5323,8 @@ */ pmap_invalidate_page(dst_pmap, addr); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, + true); } goto out; } @@ -5755,7 +5742,7 @@ rw_wunlock(lock); pmap_invalidate_all(pmap); PMAP_UNLOCK(pmap); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, true); } static boolean_t @@ -6240,7 +6227,7 @@ not_cleared < PMAP_TS_REFERENCED_MAX); out: rw_wunlock(lock); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, true); return (cleared + not_cleared); } Index: sys/arm/arm/pmap-v6.c =================================================================== --- sys/arm/arm/pmap-v6.c +++ sys/arm/arm/pmap-v6.c @@ -2544,18 +2544,6 @@ return (m); } -static __inline void -pmap_free_zero_pages(struct spglist *free) -{ - vm_page_t m; - - while ((m = SLIST_FIRST(free)) != NULL) { - SLIST_REMOVE_HEAD(free, plinks.s.ss); - /* Preserve the page's PG_ZERO setting. */ - vm_page_free_toq(m); - } -} - /* * Schedule the specified unused L2 page table page to be freed. Specifically, * add the page to the specified list of pages that will be released to the @@ -2941,7 +2929,7 @@ m_pc->wire_count = 1; atomic_add_int(&vm_cnt.v_wire_count, 1); } - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, false); return (m_pc); } @@ -3704,7 +3692,7 @@ VM_ALLOC_NORMAL | VM_ALLOC_WIRED)) == NULL) { SLIST_INIT(&free); pmap_remove_pte1(pmap, pte1p, pte1_trunc(va), &free); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, false); CTR3(KTR_PMAP, "%s: failure for va %#x in pmap %p", __func__, va, pmap); return (FALSE); @@ -4228,7 +4216,7 @@ sched_unpin(); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, false); } /* @@ -4302,7 +4290,7 @@ vm_page_aflag_clear(m, PGA_WRITEABLE); sched_unpin(); rw_wunlock(&pvh_global_lock); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, false); } /* @@ -4489,7 +4477,7 @@ sched_unpin(); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, false); } /* @@ -4598,7 +4586,7 @@ SLIST_INIT(&free); if (pmap_unwire_pt2(pmap, va, mpt2pg, &free)) { pmap_tlb_flush(pmap, va); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, false); } mpt2pg = NULL; @@ -6072,7 +6060,8 @@ if (pmap_unwire_pt2(dst_pmap, addr, dst_mpt2pg, &free)) { pmap_tlb_flush(dst_pmap, addr); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, + false); } goto out; } Index: sys/arm64/arm64/pmap.c =================================================================== --- sys/arm64/arm64/pmap.c +++ sys/arm64/arm64/pmap.c @@ -1260,18 +1260,6 @@ /*************************************************** * Page table page management routines..... ***************************************************/ -static __inline void -pmap_free_zero_pages(struct spglist *free) -{ - vm_page_t m; - - while ((m = SLIST_FIRST(free)) != NULL) { - SLIST_REMOVE_HEAD(free, plinks.s.ss); - /* Preserve the page's PG_ZERO setting. */ - vm_page_free_toq(m); - } -} - /* * Schedule the specified unused page table page to be freed. Specifically, * add the page to the specified list of pages that will be released to the @@ -1915,7 +1903,7 @@ m_pc->wire_count = 1; atomic_add_int(&vm_cnt.v_wire_count, 1); } - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, false); return (m_pc); } @@ -2423,7 +2411,7 @@ if (lock != NULL) rw_wunlock(lock); PMAP_UNLOCK(pmap); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, false); } /* @@ -2528,7 +2516,7 @@ } vm_page_aflag_clear(m, PGA_WRITEABLE); rw_wunlock(lock); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, false); } /* @@ -3214,7 +3202,7 @@ SLIST_INIT(&free); if (pmap_unwire_l3(pmap, va, mpte, &free)) { pmap_invalidate_page(pmap, va); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, false); } mpte = NULL; } @@ -3728,7 +3716,7 @@ if (lock != NULL) rw_wunlock(lock); PMAP_UNLOCK(pmap); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, false); } /* @@ -4189,7 +4177,7 @@ not_cleared < PMAP_TS_REFERENCED_MAX); out: rw_wunlock(lock); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, false); return (cleared + not_cleared); } Index: sys/i386/i386/pmap.c =================================================================== --- sys/i386/i386/pmap.c +++ sys/i386/i386/pmap.c @@ -1700,20 +1700,6 @@ /*************************************************** * Page table page management routines..... ***************************************************/ -static __inline void -pmap_free_zero_pages(struct spglist *free) -{ - vm_page_t m; - int count; - - for (count = 0; (m = SLIST_FIRST(free)) != NULL; count++) { - SLIST_REMOVE_HEAD(free, plinks.s.ss); - /* Preserve the page's PG_ZERO setting. */ - vm_page_free_toq(m); - } - atomic_subtract_int(&vm_cnt.v_wire_count, count); -} - /* * Schedule the specified unused page table page to be freed. Specifically, * add the page to the specified list of pages that will be released to the @@ -2315,7 +2301,7 @@ /* Recycle a freed page table page. */ m_pc->wire_count = 1; } - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, true); return (m_pc); } @@ -2665,7 +2651,7 @@ pmap_remove_pde(pmap, pde, sva, &free); if ((oldpde & PG_G) == 0) pmap_invalidate_pde_page(pmap, sva, oldpde); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, true); CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x" " in pmap %p", va, pmap); return (FALSE); @@ -3035,7 +3021,7 @@ pmap_invalidate_all(pmap); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, true); } /* @@ -3109,7 +3095,7 @@ vm_page_aflag_clear(m, PGA_WRITEABLE); sched_unpin(); rw_wunlock(&pvh_global_lock); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, true); } /* @@ -3877,7 +3863,7 @@ SLIST_INIT(&free); if (pmap_unwire_ptp(pmap, mpte, &free)) { pmap_invalidate_page(pmap, va); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, true); } mpte = NULL; @@ -4192,7 +4178,8 @@ &free)) { pmap_invalidate_page(dst_pmap, addr); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, + true); } goto out; } @@ -4610,7 +4597,7 @@ pmap_invalidate_all(pmap); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, true); } /* Index: sys/riscv/riscv/pmap.c =================================================================== --- sys/riscv/riscv/pmap.c +++ sys/riscv/riscv/pmap.c @@ -1069,18 +1069,6 @@ /*************************************************** * Page table page management routines..... ***************************************************/ -static __inline void -pmap_free_zero_pages(struct spglist *free) -{ - vm_page_t m; - - while ((m = SLIST_FIRST(free)) != NULL) { - SLIST_REMOVE_HEAD(free, plinks.s.ss); - /* Preserve the page's PG_ZERO setting. */ - vm_page_free_toq(m); - } -} - /* * Schedule the specified unused page table page to be freed. Specifically, * add the page to the specified list of pages that will be released to the @@ -1883,7 +1871,7 @@ rw_wunlock(lock); rw_runlock(&pvh_global_lock); PMAP_UNLOCK(pmap); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, false); } /* @@ -1949,7 +1937,7 @@ } vm_page_aflag_clear(m, PGA_WRITEABLE); rw_wunlock(&pvh_global_lock); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, false); } /* @@ -2384,7 +2372,7 @@ SLIST_INIT(&free); if (pmap_unwire_l3(pmap, va, mpte, &free)) { pmap_invalidate_page(pmap, va); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, false); } mpte = NULL; } @@ -2790,7 +2778,7 @@ rw_wunlock(lock); rw_runlock(&pvh_global_lock); PMAP_UNLOCK(pmap); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, false); } /* @@ -3092,7 +3080,7 @@ out: rw_wunlock(lock); rw_runlock(&pvh_global_lock); - pmap_free_zero_pages(&free); + vm_page_free_spglist(&free, false); return (cleared + not_cleared); } Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h +++ sys/vm/vm_page.h @@ -547,6 +547,7 @@ vm_page_bits_t vm_page_bits(int base, int size); void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid); void vm_page_free_toq(vm_page_t m); +int vm_page_free_spglist(struct spglist *free, boolean_t update_wire_count); void vm_page_dirty_KBI(vm_page_t m); void vm_page_lock_KBI(vm_page_t m, const char *file, int line); Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -2518,15 +2518,8 @@ } if (m_mtx != NULL) mtx_unlock(m_mtx); - if ((m = SLIST_FIRST(&free)) != NULL) { - mtx_lock(&vm_page_queue_free_mtx); - do { - SLIST_REMOVE_HEAD(&free, plinks.s.ss); - vm_page_free_phys(m); - } while ((m = SLIST_FIRST(&free)) != NULL); - vm_page_free_wakeup(); - mtx_unlock(&vm_page_queue_free_mtx); - } + if (!SLIST_EMPTY(&free)) + vm_page_free_spglist(&free, false); /* this calls vm_page_free_prep() unlike old code...*/ return (error); } @@ -3055,6 +3048,47 @@ } /* + * vm_page_free_spglist: + * + * Returns a list of pages to the free list, disassociating it + * from any VM object. In another word, this is equivalent to + * calling vm_page_free_toq() for each page of a list of VM objects. + * + * The objects must be locked. The pages must be locked if it is + * managed. + * + * Return value indicates the number of VM page objects freed. + */ +int +vm_page_free_spglist(struct spglist *free, boolean_t update_wire_count) +{ + vm_page_t m; + int count; + struct pglist pgl; + + if (SLIST_EMPTY(free)) + return (0); + + count = 0; + TAILQ_INIT(&pgl); + while ((m = SLIST_FIRST(free)) != NULL) + { + count++; + SLIST_REMOVE_HEAD(free, plinks.s.ss); + if (vm_page_free_prep(m, false)) + TAILQ_INSERT_TAIL(&pgl, m, listq); + } + + /* vm_page_free_phys_pglist() checks this; should I call here myself? */ + if (!TAILQ_EMPTY(&pgl)) { + vm_page_free_phys_pglist(&pgl); + if (update_wire_count) + atomic_subtract_int(&vm_cnt.v_wire_count, count); + } + return (count); +} + +/* * vm_page_wire: * * Mark this page as wired down by yet