Index: sys/kern/kern_sendfile.c =================================================================== --- sys/kern/kern_sendfile.c +++ sys/kern/kern_sendfile.c @@ -121,76 +121,22 @@ SYSCTL_PROC(_kern_ipc, OID_AUTO, sfstat, CTLTYPE_OPAQUE | CTLFLAG_RW, NULL, 0, sfstat_sysctl, "I", "sendfile statistics"); -/* - * Detach mapped page and release resources back to the system. Called - * by mbuf(9) code when last reference to a page is freed. - */ -static void -sendfile_free_page(vm_page_t pg, bool nocache) -{ - bool freed; - - vm_page_lock(pg); - /* - * In either case check for the object going away on us. This can - * happen since we don't hold a reference to it. If so, we're - * responsible for freeing the page. In 'noncache' case try to free - * the page, but only if it is cheap to. - */ - if (vm_page_unwire_noq(pg)) { - vm_object_t obj; - - if ((obj = pg->object) == NULL) - vm_page_free(pg); - else { - freed = false; - if (nocache && !vm_page_xbusied(pg) && - VM_OBJECT_TRYWLOCK(obj)) { - /* Only free unmapped pages. */ - if (obj->ref_count == 0 || - !pmap_page_is_mapped(pg)) - /* - * The busy test before the object is - * locked cannot be relied upon. - */ - freed = vm_page_try_to_free(pg); - VM_OBJECT_WUNLOCK(obj); - } - if (!freed) { - /* - * If we were asked to not cache the page, place - * it near the head of the inactive queue so - * that it is reclaimed sooner. Otherwise, - * maintain LRU. - */ - if (nocache) - vm_page_deactivate_noreuse(pg); - else if (vm_page_active(pg)) - vm_page_reference(pg); - else - vm_page_deactivate(pg); - } - } - } - vm_page_unlock(pg); -} - static void sendfile_free_mext(struct mbuf *m) { struct sf_buf *sf; vm_page_t pg; - bool nocache; + int flags; KASSERT(m->m_flags & M_EXT && m->m_ext.ext_type == EXT_SFBUF, ("%s: m %p !M_EXT or !EXT_SFBUF", __func__, m)); sf = m->m_ext.ext_arg1; pg = sf_buf_page(sf); - nocache = m->m_ext.ext_flags & EXT_FLAG_NOCACHE; + flags = (m->m_ext.ext_flags & EXT_FLAG_NOCACHE) != 0 ? VPR_TRYFREE : 0; sf_buf_free(sf); - sendfile_free_page(pg, nocache); + vm_page_release(pg, flags); if (m->m_ext.ext_flags & EXT_FLAG_SYNC) { struct sendfile_sync *sfs = m->m_ext.ext_arg2; @@ -208,21 +154,21 @@ { struct mbuf_ext_pgs *ext_pgs; vm_page_t pg; - int i; - bool nocache, cache_last; + int flags, i; + bool cache_last; KASSERT(m->m_flags & M_EXT && m->m_ext.ext_type == EXT_PGS, ("%s: m %p !M_EXT or !EXT_PGS", __func__, m)); - nocache = m->m_ext.ext_flags & EXT_FLAG_NOCACHE; cache_last = m->m_ext.ext_flags & EXT_FLAG_CACHE_LAST; ext_pgs = m->m_ext.ext_pgs; + flags = (m->m_ext.ext_flags & EXT_FLAG_NOCACHE) != 0 ? VPR_TRYFREE : 0; for (i = 0; i < ext_pgs->npgs; i++) { if (cache_last && i == ext_pgs->npgs - 1) - nocache = false; + flags = 0; pg = PHYS_TO_VM_PAGE(ext_pgs->pa[i]); - sendfile_free_page(pg, nocache); + vm_page_release(pg, flags); } if (m->m_ext.ext_flags & EXT_FLAG_SYNC) { Index: sys/kern/vfs_bio.c =================================================================== --- sys/kern/vfs_bio.c +++ sys/kern/vfs_bio.c @@ -2943,47 +2943,6 @@ } } -/* - * Unwire a page held by a buf and either free it or update the page queues to - * reflect its recent use. - */ -static void -vfs_vmio_unwire(struct buf *bp, vm_page_t m) -{ - bool freed; - - vm_page_lock(m); - if (vm_page_unwire_noq(m)) { - if ((bp->b_flags & B_DIRECT) != 0) - freed = vm_page_try_to_free(m); - else - freed = false; - if (!freed) { - /* - * Use a racy check of the valid bits to determine - * whether we can accelerate reclamation of the page. - * The valid bits will be stable unless the page is - * being mapped or is referenced by multiple buffers, - * and in those cases we expect races to be rare. At - * worst we will either accelerate reclamation of a - * valid page and violate LRU, or unnecessarily defer - * reclamation of an invalid page. - * - * The B_NOREUSE flag marks data that is not expected to - * be reused, so accelerate reclamation in that case - * too. Otherwise, maintain LRU. - */ - if (m->valid == 0 || (bp->b_flags & B_NOREUSE) != 0) - vm_page_deactivate_noreuse(m); - else if (vm_page_active(m)) - vm_page_reference(m); - else - vm_page_deactivate(m); - } - } - vm_page_unlock(m); -} - /* * Perform page invalidation when a buffer is released. The fully invalid * pages will be reclaimed later in vfs_vmio_truncate(). @@ -2993,7 +2952,7 @@ { vm_object_t obj; vm_page_t m; - int i, resid, poffset, presid; + int flags, i, resid, poffset, presid; if (buf_mapped(bp)) { BUF_CHECK_MAPPED(bp); @@ -3012,6 +2971,7 @@ * * See man buf(9) for more information */ + flags = (bp->b_flags & B_NOREUSE) != 0 ? VPR_NOREUSE : 0; obj = bp->b_bufobj->bo_object; resid = bp->b_bufsize; poffset = bp->b_offset & PAGE_MASK; @@ -3033,7 +2993,7 @@ } if (pmap_page_wired_mappings(m) == 0) vm_page_set_invalid(m, poffset, presid); - vfs_vmio_unwire(bp, m); + vm_page_release_locked(m, flags); resid -= presid; poffset = 0; } @@ -3049,7 +3009,7 @@ { vm_object_t obj; vm_page_t m; - int i; + int flags, i; if (bp->b_npages == desiredpages) return; @@ -3064,14 +3024,22 @@ /* * The object lock is needed only if we will attempt to free pages. */ - obj = (bp->b_flags & B_DIRECT) != 0 ? bp->b_bufobj->bo_object : NULL; - if (obj != NULL) + flags = (bp->b_flags & B_NOREUSE) != 0 ? VPR_NOREUSE : 0; + if ((bp->b_flags & B_DIRECT) != 0) { + flags |= VPR_TRYFREE; + obj = bp->b_bufobj->bo_object; VM_OBJECT_WLOCK(obj); + } else { + obj = NULL; + } for (i = desiredpages; i < bp->b_npages; i++) { m = bp->b_pages[i]; KASSERT(m != bogus_page, ("allocbuf: bogus page found")); bp->b_pages[i] = NULL; - vfs_vmio_unwire(bp, m); + if (obj != NULL) + vm_page_release_locked(m, flags); + else + vm_page_release(m, flags); } if (obj != NULL) VM_OBJECT_WUNLOCK(obj); Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h +++ sys/vm/vm_page.h @@ -562,8 +562,12 @@ bool vm_page_reclaim_contig_domain(int domain, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary); void vm_page_reference(vm_page_t m); +#define VPR_TRYFREE 0x01 +#define VPR_NOREUSE 0x02 +void vm_page_release(vm_page_t m, int flags); +void vm_page_release_locked(vm_page_t m, int flags); bool vm_page_remove(vm_page_t); -int vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t); +int vm_page_rename(vm_page_t, vm_object_t, vm_pindex_t); vm_page_t vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex); void vm_page_requeue(vm_page_t m); @@ -574,7 +578,6 @@ int vm_page_sleep_if_busy(vm_page_t m, const char *msg); vm_offset_t vm_page_startup(vm_offset_t vaddr); void vm_page_sunbusy(vm_page_t m); -bool vm_page_try_to_free(vm_page_t m); int vm_page_trysbusy(vm_page_t m); void vm_page_unhold_pages(vm_page_t *ma, int count); void vm_page_unswappable(vm_page_t m); Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -168,9 +168,9 @@ vm_page_t m_run, vm_paddr_t high); static int vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, int req); -static int vm_page_import(void *arg, void **store, int cnt, int domain, +static int vm_page_zone_import(void *arg, void **store, int cnt, int domain, int flags); -static void vm_page_release(void *arg, void **store, int cnt); +static void vm_page_zone_release(void *arg, void **store, int cnt); SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL); @@ -210,7 +210,7 @@ pgcache->pool = pool; pgcache->zone = uma_zcache_create("vm pgcache", sizeof(struct vm_page), NULL, NULL, NULL, NULL, - vm_page_import, vm_page_release, pgcache, + vm_page_zone_import, vm_page_zone_release, pgcache, UMA_ZONE_MAXBUCKET | UMA_ZONE_VM); (void)uma_zone_set_maxcache(pgcache->zone, 0); } @@ -2208,7 +2208,7 @@ } static int -vm_page_import(void *arg, void **store, int cnt, int domain, int flags) +vm_page_zone_import(void *arg, void **store, int cnt, int domain, int flags) { struct vm_domain *vmd; struct vm_pgcache *pgcache; @@ -2231,7 +2231,7 @@ } static void -vm_page_release(void *arg, void **store, int cnt) +vm_page_zone_release(void *arg, void **store, int cnt) { struct vm_domain *vmd; struct vm_pgcache *pgcache; @@ -3748,28 +3748,91 @@ } /* - * Attempt to free the page. If it cannot be freed, do nothing. Returns true - * if the page is freed and false otherwise. - * - * The page must be managed. The page and its containing object must be - * locked. + * Unwire a page and either attempt to free it or re-add it to the page queues. */ -bool -vm_page_try_to_free(vm_page_t m) +void +vm_page_release(vm_page_t m, int flags) +{ + vm_object_t object; + bool freed; + + KASSERT((m->oflags & VPO_UNMANAGED) == 0, + ("vm_page_release: page %p is managed", m)); + + vm_page_lock(m); + if (vm_page_unwire_noq(m)) { + if ((object = m->object) == NULL) { + vm_page_free(m); + } else { + freed = false; + if ((flags & VPR_TRYFREE) != 0 && !vm_page_busied(m) && + VM_OBJECT_TRYWLOCK(object)) { + /* + * Only free unmapped pages. The busy test from + * before the object was locked cannot be relied + * upon. + */ + if ((object->ref_count == 0 || + !pmap_page_is_mapped(m)) && m->dirty == 0 && + !vm_page_busied(m)) { + vm_page_free(m); + freed = true; + } + VM_OBJECT_WUNLOCK(object); + } + + if (!freed) { + /* + * Use a racy check of the valid bits to + * determine whether we should accelerate + * reclamation of the page. At worst we will + * either accelerate reclamation of a valid page + * and violate LRU, or unnecessarily defer + * reclamation of an invalid page. + * + * If we were asked to not cache the page, place + * it near the head of the inactive queue so + * that is reclaimed sooner. + */ + if ((flags & (VPR_NOREUSE | VPR_TRYFREE)) != 0 || + m->valid == 0) + vm_page_deactivate_noreuse(m); + else if (vm_page_active(m)) + vm_page_reference(m); + else + vm_page_deactivate(m); + } + } + } + vm_page_unlock(m); +} + +/* See vm_page_release(). */ +void +vm_page_release_locked(vm_page_t m, int flags) { - vm_page_assert_locked(m); VM_OBJECT_ASSERT_WLOCKED(m->object); - KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("page %p is unmanaged", m)); - if (m->dirty != 0 || vm_page_wired(m) || vm_page_busied(m)) - return (false); - if (m->object->ref_count != 0) { - pmap_remove_all(m); - if (m->dirty != 0) - return (false); + KASSERT((m->oflags & VPO_UNMANAGED) == 0, + ("vm_page_release_locked: page %p is managed", m)); + + vm_page_lock(m); + if (vm_page_unwire_noq(m)) { + if ((flags & VPR_TRYFREE) != 0 && + (m->object->ref_count == 0 || !pmap_page_is_mapped(m)) && + m->dirty == 0 && !vm_page_busied(m)) { + vm_page_free(m); + } else { + if ((flags & (VPR_TRYFREE | VPR_NOREUSE)) != 0 || + m->valid == 0) + vm_page_deactivate_noreuse(m); + else if (vm_page_active(m)) + vm_page_reference(m); + else + vm_page_deactivate(m); + } } - vm_page_free(m); - return (true); + vm_page_unlock(m); } /*