Index: sys/fs/tmpfs/tmpfs_subr.c =================================================================== --- sys/fs/tmpfs/tmpfs_subr.c +++ sys/fs/tmpfs/tmpfs_subr.c @@ -361,6 +361,57 @@ return (1); } +static int +tmpfs_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base, + int end, boolean_t ignerr) +{ + vm_page_t m; + int rv; + + VM_OBJECT_ASSERT_WLOCKED(object); + KASSERT(base >= 0, ("%s: base %d", __func__, base)); + KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base, + end)); + +retry: + m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT); + if (m != NULL) { + MPASS(vm_page_all_valid(m)); + } else if (vm_pager_has_page(object, idx, NULL, NULL)) { + m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL | + VM_ALLOC_WAITFAIL); + if (m == NULL) + goto retry; + vm_object_pip_add(object, 1); + VM_OBJECT_WUNLOCK(object); + rv = vm_pager_get_pages(object, &m, 1, NULL, NULL); + VM_OBJECT_WLOCK(object); + vm_object_pip_wakeup(object); + if (rv == VM_PAGER_OK) { + /* + * Since the page was not resident, and therefore not + * recently accessed, immediately enqueue it for + * asynchronous laundering. The current operation is + * not regarded as an access. + */ + vm_page_launder(m); + } else { + vm_page_free(m); + if (ignerr) + m = NULL; + else + return (EIO); + } + } + if (m != NULL) { + pmap_zero_page_area(m, base, end - base); + vm_page_set_dirty(m); + vm_page_xunbusy(m); + } + + return (0); +} + void tmpfs_ref_node(struct tmpfs_node *node) { @@ -1662,7 +1713,6 @@ struct tmpfs_mount *tmp; struct tmpfs_node *node; vm_object_t uobj; - vm_page_t m; vm_pindex_t idx, newpages, oldpages; off_t oldsize; int base, rv; @@ -1702,45 +1752,11 @@ base = newsize & PAGE_MASK; if (base != 0) { idx = OFF_TO_IDX(newsize); -retry: - m = vm_page_grab(uobj, idx, VM_ALLOC_NOCREAT); - if (m != NULL) { - MPASS(vm_page_all_valid(m)); - } else if (vm_pager_has_page(uobj, idx, NULL, NULL)) { - m = vm_page_alloc(uobj, idx, VM_ALLOC_NORMAL | - VM_ALLOC_WAITFAIL); - if (m == NULL) - goto retry; - vm_object_pip_add(uobj, 1); + rv = tmpfs_partial_page_invalidate(uobj, idx, base, + PAGE_SIZE, ignerr); + if (rv != 0) { VM_OBJECT_WUNLOCK(uobj); - rv = vm_pager_get_pages(uobj, &m, 1, NULL, - NULL); - VM_OBJECT_WLOCK(uobj); - vm_object_pip_wakeup(uobj); - if (rv == VM_PAGER_OK) { - /* - * Since the page was not resident, - * and therefore not recently - * accessed, immediately enqueue it - * for asynchronous laundering. The - * current operation is not regarded - * as an access. - */ - vm_page_launder(m); - } else { - vm_page_free(m); - if (ignerr) - m = NULL; - else { - VM_OBJECT_WUNLOCK(uobj); - return (EIO); - } - } - } - if (m != NULL) { - pmap_zero_page_area(m, base, PAGE_SIZE - base); - vm_page_set_dirty(m); - vm_page_xunbusy(m); + return (rv); } }