Index: sys/kern/kern_sendfile.c =================================================================== --- sys/kern/kern_sendfile.c +++ sys/kern/kern_sendfile.c @@ -121,60 +121,6 @@ SYSCTL_PROC(_kern_ipc, OID_AUTO, sfstat, CTLTYPE_OPAQUE | CTLFLAG_RW, NULL, 0, sfstat_sysctl, "I", "sendfile statistics"); -/* - * Detach mapped page and release resources back to the system. Called - * by mbuf(9) code when last reference to a page is freed. - */ -static void -sendfile_free_page(vm_page_t pg, bool nocache) -{ - bool freed; - - vm_page_lock(pg); - /* - * In either case check for the object going away on us. This can - * happen since we don't hold a reference to it. If so, we're - * responsible for freeing the page. In 'noncache' case try to free - * the page, but only if it is cheap to. - */ - if (vm_page_unwire_noq(pg)) { - vm_object_t obj; - - if ((obj = pg->object) == NULL) - vm_page_free(pg); - else { - freed = false; - if (nocache && !vm_page_xbusied(pg) && - VM_OBJECT_TRYWLOCK(obj)) { - /* Only free unmapped pages. */ - if (obj->ref_count == 0 || - !pmap_page_is_mapped(pg)) - /* - * The busy test before the object is - * locked cannot be relied upon. - */ - freed = vm_page_try_to_free(pg); - VM_OBJECT_WUNLOCK(obj); - } - if (!freed) { - /* - * If we were asked to not cache the page, place - * it near the head of the inactive queue so - * that it is reclaimed sooner. Otherwise, - * maintain LRU. - */ - if (nocache) - vm_page_deactivate_noreuse(pg); - else if (vm_page_active(pg)) - vm_page_reference(pg); - else - vm_page_deactivate(pg); - } - } - } - vm_page_unlock(pg); -} - static void sendfile_free_mext(struct mbuf *m) { @@ -190,7 +136,7 @@ nocache = m->m_ext.ext_flags & EXT_FLAG_NOCACHE; sf_buf_free(sf); - sendfile_free_page(pg, nocache); + vm_page_release(pg, nocache); if (m->m_ext.ext_flags & EXT_FLAG_SYNC) { struct sendfile_sync *sfs = m->m_ext.ext_arg2; @@ -222,7 +168,7 @@ if (cache_last && i == ext_pgs->npgs - 1) nocache = false; pg = PHYS_TO_VM_PAGE(ext_pgs->pa[i]); - sendfile_free_page(pg, nocache); + vm_page_release(pg, nocache); } if (m->m_ext.ext_flags & EXT_FLAG_SYNC) { Index: sys/kern/vfs_bio.c =================================================================== --- sys/kern/vfs_bio.c +++ sys/kern/vfs_bio.c @@ -2943,47 +2943,6 @@ } } -/* - * Unwire a page held by a buf and either free it or update the page queues to - * reflect its recent use. - */ -static void -vfs_vmio_unwire(struct buf *bp, vm_page_t m) -{ - bool freed; - - vm_page_lock(m); - if (vm_page_unwire_noq(m)) { - if ((bp->b_flags & B_DIRECT) != 0) - freed = vm_page_try_to_free(m); - else - freed = false; - if (!freed) { - /* - * Use a racy check of the valid bits to determine - * whether we can accelerate reclamation of the page. - * The valid bits will be stable unless the page is - * being mapped or is referenced by multiple buffers, - * and in those cases we expect races to be rare. At - * worst we will either accelerate reclamation of a - * valid page and violate LRU, or unnecessarily defer - * reclamation of an invalid page. - * - * The B_NOREUSE flag marks data that is not expected to - * be reused, so accelerate reclamation in that case - * too. Otherwise, maintain LRU. - */ - if (m->valid == 0 || (bp->b_flags & B_NOREUSE) != 0) - vm_page_deactivate_noreuse(m); - else if (vm_page_active(m)) - vm_page_reference(m); - else - vm_page_deactivate(m); - } - } - vm_page_unlock(m); -} - /* * Perform page invalidation when a buffer is released. The fully invalid * pages will be reclaimed later in vfs_vmio_truncate(). @@ -3033,7 +2992,7 @@ } if (pmap_page_wired_mappings(m) == 0) vm_page_set_invalid(m, poffset, presid); - vfs_vmio_unwire(bp, m); + vm_page_release_locked(m, (bp->b_flags & B_NOREUSE) != 0); resid -= presid; poffset = 0; } @@ -3071,7 +3030,10 @@ m = bp->b_pages[i]; KASSERT(m != bogus_page, ("allocbuf: bogus page found")); bp->b_pages[i] = NULL; - vfs_vmio_unwire(bp, m); + if (obj != NULL) + vm_page_release_locked(m, true); + else + vm_page_release(m, (bp->b_flags & B_NOREUSE) != 0); } if (obj != NULL) VM_OBJECT_WUNLOCK(obj); Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h +++ sys/vm/vm_page.h @@ -562,8 +562,10 @@ bool vm_page_reclaim_contig_domain(int domain, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary); void vm_page_reference(vm_page_t m); +void vm_page_release(vm_page_t m, bool nocache); +void vm_page_release_locked(vm_page_t m, bool nocache); bool vm_page_remove(vm_page_t); -int vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t); +int vm_page_rename(vm_page_t, vm_object_t, vm_pindex_t); vm_page_t vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex); void vm_page_requeue(vm_page_t m); @@ -574,7 +576,6 @@ int vm_page_sleep_if_busy(vm_page_t m, const char *msg); vm_offset_t vm_page_startup(vm_offset_t vaddr); void vm_page_sunbusy(vm_page_t m); -bool vm_page_try_to_free(vm_page_t m); int vm_page_trysbusy(vm_page_t m); void vm_page_unhold_pages(vm_page_t *ma, int count); void vm_page_unswappable(vm_page_t m); Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -168,9 +168,9 @@ vm_page_t m_run, vm_paddr_t high); static int vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, int req); -static int vm_page_import(void *arg, void **store, int cnt, int domain, +static int vm_page_zone_import(void *arg, void **store, int cnt, int domain, int flags); -static void vm_page_release(void *arg, void **store, int cnt); +static void vm_page_zone_release(void *arg, void **store, int cnt); SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL); @@ -210,7 +210,7 @@ pgcache->pool = pool; pgcache->zone = uma_zcache_create("vm pgcache", sizeof(struct vm_page), NULL, NULL, NULL, NULL, - vm_page_import, vm_page_release, pgcache, + vm_page_zone_import, vm_page_zone_release, pgcache, UMA_ZONE_MAXBUCKET | UMA_ZONE_VM); (void)uma_zone_set_maxcache(pgcache->zone, 0); } @@ -2208,7 +2208,7 @@ } static int -vm_page_import(void *arg, void **store, int cnt, int domain, int flags) +vm_page_zone_import(void *arg, void **store, int cnt, int domain, int flags) { struct vm_domain *vmd; struct vm_pgcache *pgcache; @@ -2231,7 +2231,7 @@ } static void -vm_page_release(void *arg, void **store, int cnt) +vm_page_zone_release(void *arg, void **store, int cnt) { struct vm_domain *vmd; struct vm_pgcache *pgcache; @@ -3747,31 +3747,104 @@ vm_page_enqueue(m, PQ_UNSWAPPABLE); } -/* - * Attempt to free the page. If it cannot be freed, do nothing. Returns true - * if the page is freed and false otherwise. - * - * The page must be managed. The page and its containing object must be - * locked. - */ -bool +static bool vm_page_try_to_free(vm_page_t m) { vm_page_assert_locked(m); VM_OBJECT_ASSERT_WLOCKED(m->object); - KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("page %p is unmanaged", m)); - if (m->dirty != 0 || vm_page_wired(m) || vm_page_busied(m)) + + if (m->dirty != 0 || vm_page_busied(m)) return (false); - if (m->object->ref_count != 0) { - pmap_remove_all(m); - if (m->dirty != 0) - return (false); - } vm_page_free(m); return (true); } +/* + * Unwire a page and either attempt to free it or re-add it to the page queues. + */ +void +vm_page_release(vm_page_t m, bool nocache) +{ + vm_object_t object; + bool freed; + + KASSERT((m->oflags & VPO_UNMANAGED) == 0, + ("vm_page_release: page %p is managed", m)); + + vm_page_lock(m); + if (vm_page_unwire_noq(m)) { + if ((object = m->object) == NULL) { + vm_page_free(m); + } else { + freed = false; + if (nocache && !vm_page_xbusied(m) && + VM_OBJECT_TRYWLOCK(object)) { + /* Only free unmapped pages. */ + if (object->ref_count == 0 || + !pmap_page_is_mapped(m)) + /* + * The busy test before the object is + * locked cannot be relied upon. + */ + freed = vm_page_try_to_free(m); + VM_OBJECT_WUNLOCK(object); + } + + if (!freed) { + /* + * Use a racy check of the valid bits to + * determine whether we should accelerate + * reclamation of the page. At worst we will + * either accelerate reclamation of a valid page + * and violate LRU, or unnecessarily defer + * reclamation of an invalid page. + * + * If we were asked to not cache the page, place + * it near the head of the inactive queue so + * that is reclaimed sooner. + */ + if (nocache || m->valid == 0) + vm_page_deactivate_noreuse(m); + else if (vm_page_active(m)) + vm_page_reference(m); + else + vm_page_deactivate(m); + } + } + } + vm_page_unlock(m); +} + +/* See vm_page_release(). */ +void +vm_page_release_locked(vm_page_t m, bool nocache) +{ + bool freed; + + VM_OBJECT_ASSERT_WLOCKED(m->object); + KASSERT((m->oflags & VPO_UNMANAGED) == 0, + ("vm_page_release_locked: page %p is managed", m)); + + vm_page_lock(m); + if (vm_page_unwire_noq(m)) { + freed = false; + nocache |= m->valid == 0; + if (nocache && (m->object->ref_count == 0 || + !pmap_page_is_mapped(m))) + freed = vm_page_try_to_free(m); + if (!freed) { + if (nocache) + vm_page_deactivate_noreuse(m); + else if (vm_page_active(m)) + vm_page_reference(m); + else + vm_page_deactivate(m); + } + } + vm_page_unlock(m); +} + /* * vm_page_advise *