Index: sys/ufs/ffs/ffs_vnops.c =================================================================== --- sys/ufs/ffs/ffs_vnops.c +++ sys/ufs/ffs/ffs_vnops.c @@ -77,6 +77,7 @@ #include #include #include +#include #include #include @@ -86,6 +87,7 @@ #include #include #include +#include #include #include @@ -118,14 +120,14 @@ static vop_openextattr_t ffs_openextattr; static vop_setextattr_t ffs_setextattr; static vop_vptofh_t ffs_vptofh; - +static vop_getpages_t ffs_getpages; /* Global vfs data structures for ufs. */ struct vop_vector ffs_vnodeops1 = { .vop_default = &ufs_vnodeops, .vop_fsync = ffs_fsync, .vop_fdatasync = ffs_fdatasync, - .vop_getpages = vnode_pager_local_getpages, + .vop_getpages = ffs_getpages, .vop_getpages_async = vnode_pager_local_getpages_async, .vop_lock1 = ffs_lock, .vop_read = ffs_read, @@ -147,7 +149,7 @@ .vop_default = &ufs_vnodeops, .vop_fsync = ffs_fsync, .vop_fdatasync = ffs_fdatasync, - .vop_getpages = vnode_pager_local_getpages, + .vop_getpages = ffs_getpages, .vop_getpages_async = vnode_pager_local_getpages_async, .vop_lock1 = ffs_lock, .vop_read = ffs_read, @@ -1784,3 +1786,136 @@ ufhp->ufid_gen = ip->i_gen; return (0); } + +SYSCTL_DECL(_vfs_ffs); +static int use_buf_pager; +SYSCTL_INT(_vfs_ffs, OID_AUTO, use_buf_pager, CTLFLAG_RWTUN, &use_buf_pager, 0, + "Always use buffer pager instead of bmap"); + +static int +ffs_getpages(struct vop_getpages_args *ap) +{ + struct vnode *vp; + vm_page_t *mm, m; + vm_object_t object; + struct buf *bp; + struct ufsmount *um; + vm_pindex_t pi; + ufs_lbn_t lbn, lbnp; + vm_ooffset_t la, lb; + long bsize; + int bo_bs, count, error, i; + bool redo, lpart; + + vp = ap->a_vp; + mm = ap->a_m; + count = ap->a_count; + + um = VFSTOUFS(ap->a_vp->v_mount); + bo_bs = um->um_devvp->v_bufobj.bo_bsize; + if (!use_buf_pager && bo_bs <= PAGE_SIZE) + return (vnode_pager_generic_getpages(vp, mm, count, + ap->a_rbehind, ap->a_rahead, NULL, NULL)); + + object = vp->v_object; + la = IDX_TO_OFF(mm[count - 1]->pindex); + if (la >= object->un_pager.vnp.vnp_size) + return (VM_PAGER_BAD); + lpart = la + PAGE_SIZE > object->un_pager.vnp.vnp_size; + if (ap->a_rbehind != NULL) { + lb = IDX_TO_OFF(mm[0]->pindex); + *ap->a_rbehind = OFF_TO_IDX(lb - rounddown2(lb, bo_bs)); + } + if (ap->a_rahead != NULL) { + *ap->a_rahead = OFF_TO_IDX(roundup2(la, bo_bs) - la); + if (la + IDX_TO_OFF(*ap->a_rahead) >= + object->un_pager.vnp.vnp_size) { + *ap->a_rahead = OFF_TO_IDX(roundup2(object->un_pager. + vnp.vnp_size, PAGE_SIZE) - la); + } + } + VM_OBJECT_WLOCK(object); +again: + for (i = 0; i < count; i++) + vm_page_busy_downgrade(mm[i]); + VM_OBJECT_WUNLOCK(object); + + lbnp = -1; + for (i = 0; i < count; i++) { + m = mm[i]; + if (m->valid == VM_PAGE_BITS_ALL) + continue; + lbn = lblkno(um->um_fs, IDX_TO_OFF(m->pindex)); + if (lbn != lbnp) { + bsize = blksize(um->um_fs, VTOI(vp), lbn); + error = bread_gb(vp, lbn, bsize, NOCRED, GB_UNMAPPED, + &bp); + if (error != 0) + break; + KASSERT(m->valid == VM_PAGE_BITS_ALL || i == count - 1, + ("buf %d %p invalid", i, m)); + if (i == count - 1 && lpart) { + VM_OBJECT_WLOCK(object); + if (m->valid != 0 && + m->valid != VM_PAGE_BITS_ALL) + vm_page_zero_invalid(m, TRUE); + VM_OBJECT_WUNLOCK(object); + } + if (LIST_EMPTY(&bp->b_dep)) { + bp->b_flags |= B_RELBUF; + bp->b_flags &= ~B_NOCACHE; + brelse(bp); + } else { + /* XXX */ + bqrelse(bp); + } + lbnp = lbn; + } + } + + VM_OBJECT_WLOCK(object); + redo = false; + for (i = 0; i < count; i++) { + m = mm[i]; + vm_page_sunbusy(m); + while (vm_page_busied(m)) { + pi = m->pindex; + vm_page_reference(m); + vm_page_lock(m); + VM_OBJECT_WUNLOCK(object); + vm_page_busy_sleep(m, "ffspgl", false); + VM_OBJECT_WLOCK(object); + m = vm_page_lookup(object, pi); + while (m == NULL) { + m = vm_page_alloc(object, pi, VM_ALLOC_NORMAL); + if (m != NULL) { + mm[i] = m; + redo = true; + goto next_i; + } + VM_WAIT; + } + if (m != mm[i]) + mm[i] = m; + } + vm_page_xbusy(m); + /* + * Since pages were only sbusy while neither the + * buffer or the object locks were held by us, they + * could have been invalidated. Recheck valid bits + * and re-read as needed. + * + * Note that the last page is made fully valid in the + * read loop, and partial validity for count - 1 index + * could mean that the page was invalidated or + * removed, so must cause restart for safety as well. + */ + if (m->valid != VM_PAGE_BITS_ALL) + redo = true; +next_i:; + } + if (redo && error == 0) + goto again; + VM_OBJECT_WUNLOCK(object); + return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK); +}