diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/ccompat.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/ccompat.h --- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/ccompat.h +++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/ccompat.h @@ -50,10 +50,12 @@ #define VFS_VOP_VECTOR_REGISTER(x) #endif -#if __FreeBSD_version >= 1300076 -#define getnewvnode_reserve_() getnewvnode_reserve() +#if __FreeBSD_version >= 1400006 +#define getnewvnode_reserve_(mp) getnewvnode_reserve(mp) +#elif __FreeBSD_version >= 1300076 +#define getnewvnode_reserve_(mp) getnewvnode_reserve() #else -#define getnewvnode_reserve_() getnewvnode_reserve(1) +#define getnewvnode_reserve_(mp) getnewvnode_reserve(1) #endif #if __FreeBSD_version < 1300102 diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_dir.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_dir.c --- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_dir.c +++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_dir.c @@ -818,7 +818,7 @@ return (SET_ERROR(EDQUOT)); } - getnewvnode_reserve_(); + getnewvnode_reserve_(zfsvfs->z_vfs); tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c --- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c +++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c @@ -1116,7 +1116,7 @@ goto out; } - getnewvnode_reserve_(); + getnewvnode_reserve_(zfsvfs->z_vfs); tx = dmu_tx_create(os); @@ -1463,7 +1463,7 @@ /* * Add a new entry to the directory. */ - getnewvnode_reserve_(); + getnewvnode_reserve_(zfsvfs->z_vfs); tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname); dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); @@ -3554,7 +3554,7 @@ return (SET_ERROR(EDQUOT)); } - getnewvnode_reserve_(); + getnewvnode_reserve_(zfsvfs->z_vfs); tx = dmu_tx_create(zfsvfs->z_os); fuid_dirtied = zfsvfs->z_fuid_dirty; dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len)); @@ -4322,7 +4322,7 @@ { if (ap->a_bop != NULL) - *ap->a_bop = &ap->a_vp->v_bufobj; + *ap->a_bop = NULL; if (ap->a_bnp != NULL) *ap->a_bnp = ap->a_bn; if (ap->a_runp != NULL) diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode.c --- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode.c +++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode.c @@ -920,7 +920,7 @@ int err; td = curthread; - getnewvnode_reserve_(); + getnewvnode_reserve_(zfsvfs->z_vfs); again: *zpp = NULL; ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num); diff --git a/sys/fs/cd9660/cd9660_bmap.c b/sys/fs/cd9660/cd9660_bmap.c --- a/sys/fs/cd9660/cd9660_bmap.c +++ b/sys/fs/cd9660/cd9660_bmap.c @@ -73,7 +73,7 @@ * to physical mapping is requested. */ if (ap->a_bop != NULL) - *ap->a_bop = &ip->i_mnt->im_devvp->v_bufobj; + *ap->a_bop = vp2bo(ip->i_mnt->im_devvp); if (ap->a_bnp == NULL) return (0); diff --git a/sys/fs/cd9660/cd9660_vfsops.c b/sys/fs/cd9660/cd9660_vfsops.c --- a/sys/fs/cd9660/cd9660_vfsops.c +++ b/sys/fs/cd9660/cd9660_vfsops.c @@ -241,7 +241,7 @@ if (mp->mnt_iosize_max > maxphys) mp->mnt_iosize_max = maxphys; - bo = &devvp->v_bufobj; + bo = vp2bo(devvp); /* This is the "logical sector size". The standard says this * should be 2048 or the physical sector size on the device, @@ -383,7 +383,8 @@ if (isverified) mp->mnt_flag |= MNT_VERIFIED; mp->mnt_flag |= MNT_LOCAL; - mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED; + mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | + MNTK_USES_BCACHE; MNT_IUNLOCK(mp); isomp->im_mountp = mp; isomp->im_dev = dev; diff --git a/sys/fs/devfs/devfs_vfsops.c b/sys/fs/devfs/devfs_vfsops.c --- a/sys/fs/devfs/devfs_vfsops.c +++ b/sys/fs/devfs/devfs_vfsops.c @@ -131,7 +131,7 @@ MNT_ILOCK(mp); mp->mnt_flag |= MNT_LOCAL; mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | - MNTK_NOMSYNC; + MNTK_NOMSYNC | MNTK_USES_BCACHE; #ifdef MAC mp->mnt_flag |= MNT_MULTILABEL; #endif diff --git a/sys/fs/ext2fs/ext2_bmap.c b/sys/fs/ext2fs/ext2_bmap.c --- a/sys/fs/ext2fs/ext2_bmap.c +++ b/sys/fs/ext2fs/ext2_bmap.c @@ -72,7 +72,7 @@ * to physical mapping is requested. */ if (ap->a_bop != NULL) - *ap->a_bop = &VTOI(ap->a_vp)->i_devvp->v_bufobj; + *ap->a_bop = vp2bo(VTOI(ap->a_vp)->i_devvp); if (ap->a_bnp == NULL) return (0); @@ -275,7 +275,8 @@ */ metalbn = ap->in_lbn; - if ((daddr == 0 && !incore(&vp->v_bufobj, metalbn)) || metalbn == bn) + if ((daddr == 0 && !incore(vp2bo(vp), metalbn)) || + metalbn == bn) break; /* * If we get here, we've either got the block in the cache diff --git a/sys/fs/ext2fs/ext2_inode.c b/sys/fs/ext2fs/ext2_inode.c --- a/sys/fs/ext2fs/ext2_inode.c +++ b/sys/fs/ext2fs/ext2_inode.c @@ -246,7 +246,7 @@ oip = VTOI(ovp); #ifdef INVARIANTS - bo = &ovp->v_bufobj; + bo = vp2bo(ovp); #endif fs = oip->i_e2fs; diff --git a/sys/fs/ext2fs/ext2_vfsops.c b/sys/fs/ext2fs/ext2_vfsops.c --- a/sys/fs/ext2fs/ext2_vfsops.c +++ b/sys/fs/ext2fs/ext2_vfsops.c @@ -871,7 +871,7 @@ return (EINVAL); } - bo = &devvp->v_bufobj; + bo = vp2bo(devvp); bo->bo_private = cp; bo->bo_ops = g_vfs_bufops; if (devvp->v_rdev->si_iosize_max != 0) @@ -964,7 +964,7 @@ ump->um_mountp = mp; ump->um_dev = dev; ump->um_devvp = devvp; - ump->um_bo = &devvp->v_bufobj; + ump->um_bo = vp2bo(devvp); ump->um_cp = cp; /* @@ -1151,7 +1151,7 @@ ip = VTOI(vp); if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && - (vp->v_bufobj.bo_dirty.bv_cnt == 0 || + (vp2bo(vp)->bo_dirty.bv_cnt == 0 || waitfor == MNT_LAZY)) { VI_UNLOCK(vp); continue; diff --git a/sys/fs/fuse/fuse_io.c b/sys/fs/fuse/fuse_io.c --- a/sys/fs/fuse/fuse_io.c +++ b/sys/fs/fuse/fuse_io.c @@ -1101,10 +1101,10 @@ } fvdat->flag |= FN_FLUSHINPROG; - if (vp->v_bufobj.bo_object != NULL) { - VM_OBJECT_WLOCK(vp->v_bufobj.bo_object); - vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC); - VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object); + if (vp->v_object != NULL) { + VM_OBJECT_WLOCK(vp->v_object); + vm_object_page_clean(vp->v_object, 0, 0, OBJPC_SYNC); + VM_OBJECT_WUNLOCK(vp->v_object); } error = vinvalbuf(vp, V_SAVE, PCATCH, 0); while (error) { diff --git a/sys/fs/fuse/fuse_vnops.c b/sys/fs/fuse/fuse_vnops.c --- a/sys/fs/fuse/fuse_vnops.c +++ b/sys/fs/fuse/fuse_vnops.c @@ -536,7 +536,7 @@ data->max_readahead_blocks); if (bo != NULL) - *bo = &vp->v_bufobj; + *bo = vp2bo(vp); /* * The FUSE_BMAP operation does not include the runp and runb diff --git a/sys/fs/msdosfs/msdosfs_vfsops.c b/sys/fs/msdosfs/msdosfs_vfsops.c --- a/sys/fs/msdosfs/msdosfs_vfsops.c +++ b/sys/fs/msdosfs/msdosfs_vfsops.c @@ -433,7 +433,7 @@ return (error); } dev_ref(dev); - bo = &devvp->v_bufobj; + bo = vp2bo(devvp); VOP_UNLOCK(devvp); if (dev->si_iosize_max != 0) mp->mnt_iosize_max = dev->si_iosize_max; @@ -937,7 +937,7 @@ dep = VTODE(vp); if ((dep->de_flag & (DE_ACCESS | DE_CREATE | DE_UPDATE | DE_MODIFIED)) == 0 && - (vp->v_bufobj.bo_dirty.bv_cnt == 0 || + (vp2bo(vp)->bo_dirty.bv_cnt == 0 || waitfor == MNT_LAZY)) { VI_UNLOCK(vp); continue; diff --git a/sys/fs/msdosfs/msdosfs_vnops.c b/sys/fs/msdosfs/msdosfs_vnops.c --- a/sys/fs/msdosfs/msdosfs_vnops.c +++ b/sys/fs/msdosfs/msdosfs_vnops.c @@ -1758,7 +1758,7 @@ dep = VTODE(vp); pmp = dep->de_pmp; if (ap->a_bop != NULL) - *ap->a_bop = &pmp->pm_devvp->v_bufobj; + *ap->a_bop = vp2bo(pmp->pm_devvp); if (ap->a_bnp == NULL) return (0); if (ap->a_runp != NULL) diff --git a/sys/fs/nfsclient/nfs_clbio.c b/sys/fs/nfsclient/nfs_clbio.c --- a/sys/fs/nfsclient/nfs_clbio.c +++ b/sys/fs/nfsclient/nfs_clbio.c @@ -90,7 +90,7 @@ ncl_gbp_getblkno(struct vnode *vp, vm_ooffset_t off) { - return (off / vp->v_bufobj.bo_bsize); + return (off / vp2bo(vp)->bo_bsize); } static int @@ -105,7 +105,7 @@ nsize = np->n_size; NFSUNLOCKNODE(np); - biosize = vp->v_bufobj.bo_bsize; + biosize = vp2bo(vp)->bo_bsize; bcount = biosize; if ((off_t)lbn * biosize >= nsize) bcount = 0; @@ -463,7 +463,7 @@ n = 0; on = 0; - biosize = vp->v_bufobj.bo_bsize; + biosize = vp2bo(vp)->bo_bsize; seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE); error = nfs_bioread_check_cons(vp, td, cred); @@ -491,7 +491,7 @@ for (nra = 0; nra < nmp->nm_readahead && nra < seqcount && (off_t)(lbn + 1 + nra) * biosize < nsize; nra++) { rabn = lbn + 1 + nra; - if (incore(&vp->v_bufobj, rabn) == NULL) { + if (incore(vp2bo(vp), rabn) == NULL) { rabp = nfs_getcacheblk(vp, rabn, biosize, td); if (!rabp) { error = newnfs_sigintr(nmp, td); @@ -678,7 +678,7 @@ (bp->b_flags & B_INVAL) == 0 && (np->n_direofoffset == 0 || (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) && - incore(&vp->v_bufobj, lbn + 1) == NULL) { + incore(vp2bo(vp), lbn + 1) == NULL) { NFSUNLOCKNODE(np); rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td); if (rabp) { @@ -996,7 +996,7 @@ return (EFBIG); save2 = curthread_pflags2_set(TDP2_SBPAGES); - biosize = vp->v_bufobj.bo_bsize; + biosize = vp2bo(vp)->bo_bsize; /* * Find all of this file's B_NEEDCOMMIT buffers. If our writes * would exceed the local maximum per-file write commit size when @@ -1013,15 +1013,15 @@ nflag = np->n_flag; NFSUNLOCKNODE(np); if (nflag & NMODIFIED) { - BO_LOCK(&vp->v_bufobj); - if (vp->v_bufobj.bo_dirty.bv_cnt != 0) { - TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, + BO_LOCK(vp2bo(vp)); + if (vp2bo(vp)->bo_dirty.bv_cnt != 0) { + TAILQ_FOREACH(bp, &vp2bo(vp)->bo_dirty.bv_hd, b_bobufs) { if (bp->b_flags & B_NEEDCOMMIT) wouldcommit += bp->b_bcount; } } - BO_UNLOCK(&vp->v_bufobj); + BO_UNLOCK(vp2bo(vp)); } } @@ -1340,7 +1340,7 @@ } if (vp->v_type == VREG) - bp->b_blkno = bn * (vp->v_bufobj.bo_bsize / DEV_BSIZE); + bp->b_blkno = bn * (vp2bo(vp)->bo_bsize / DEV_BSIZE); return (bp); } @@ -1378,10 +1378,10 @@ * Now, flush as required. */ if ((flags & (V_SAVE | V_VMIO)) == V_SAVE && - vp->v_bufobj.bo_object != NULL) { - VM_OBJECT_WLOCK(vp->v_bufobj.bo_object); - vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC); - VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object); + vp->v_object != NULL) { + VM_OBJECT_WLOCK(vp->v_object); + vm_object_page_clean(vp->v_object, 0, 0, OBJPC_SYNC); + VM_OBJECT_WUNLOCK(vp->v_object); /* * If the page clean was interrupted, fail the invalidation. * Not doing so, we run the risk of losing dirty pages in the @@ -1870,7 +1870,7 @@ { struct nfsnode *np = VTONFS(vp); u_quad_t tsize; - int biosize = vp->v_bufobj.bo_bsize; + int biosize = vp2bo(vp)->bo_bsize; int error = 0; NFSLOCKNODE(np); diff --git a/sys/fs/nfsclient/nfs_clnode.c b/sys/fs/nfsclient/nfs_clnode.c --- a/sys/fs/nfsclient/nfs_clnode.c +++ b/sys/fs/nfsclient/nfs_clnode.c @@ -132,8 +132,8 @@ return (error); } vp = nvp; - KASSERT(vp->v_bufobj.bo_bsize != 0, ("ncl_nget: bo_bsize == 0")); - vp->v_bufobj.bo_ops = &buf_ops_newnfs; + KASSERT(vp2bo(vp)->bo_bsize != 0, ("ncl_nget: bo_bsize == 0")); + vp2bo(vp)->bo_ops = &buf_ops_newnfs; vp->v_data = np; np->n_vnode = vp; /* diff --git a/sys/fs/nfsclient/nfs_clport.c b/sys/fs/nfsclient/nfs_clport.c --- a/sys/fs/nfsclient/nfs_clport.c +++ b/sys/fs/nfsclient/nfs_clport.c @@ -223,8 +223,8 @@ return (error); } vp = nvp; - KASSERT(vp->v_bufobj.bo_bsize != 0, ("nfscl_nget: bo_bsize == 0")); - vp->v_bufobj.bo_ops = &buf_ops_newnfs; + KASSERT(vp2bo(vp)->bo_bsize != 0, ("nfscl_nget: bo_bsize == 0")); + vp2bo(vp)->bo_ops = &buf_ops_newnfs; vp->v_data = np; np->n_vnode = vp; /* diff --git a/sys/fs/nfsclient/nfs_clsubs.c b/sys/fs/nfsclient/nfs_clsubs.c --- a/sys/fs/nfsclient/nfs_clsubs.c +++ b/sys/fs/nfsclient/nfs_clsubs.c @@ -356,7 +356,7 @@ struct bufobj *bo; MNT_VNODE_FOREACH_ALL(vp, mp, nvp) { - bo = &vp->v_bufobj; + bo = vp2bo(vp); vholdl(vp); VI_UNLOCK(vp); BO_LOCK(bo); diff --git a/sys/fs/nfsclient/nfs_clvfsops.c b/sys/fs/nfsclient/nfs_clvfsops.c --- a/sys/fs/nfsclient/nfs_clvfsops.c +++ b/sys/fs/nfsclient/nfs_clvfsops.c @@ -1500,6 +1500,9 @@ nmp->nm_vinvalbuf = ncl_vinvalbuf; } vfs_getnewfsid(mp); + MNT_ILOCK(mp); + mp->mnt_kern_flag |= MNTK_USES_BCACHE; + MNT_IUNLOCK(mp); nmp->nm_mountp = mp; mtx_init(&nmp->nm_mtx, "NFSmount lock", NULL, MTX_DEF | MTX_DUPOK); @@ -1866,7 +1869,7 @@ loop: MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { /* XXX Racy bv_cnt check. */ - if (NFSVOPISLOCKED(vp) || vp->v_bufobj.bo_dirty.bv_cnt == 0 || + if (NFSVOPISLOCKED(vp) || vp2bo(vp)->bo_dirty.bv_cnt == 0 || waitfor == MNT_LAZY) { VI_UNLOCK(vp); continue; diff --git a/sys/fs/nfsclient/nfs_clvnops.c b/sys/fs/nfsclient/nfs_clvnops.c --- a/sys/fs/nfsclient/nfs_clvnops.c +++ b/sys/fs/nfsclient/nfs_clvnops.c @@ -2775,8 +2775,7 @@ ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp)); if (vp->v_type == VREG && bp->b_blkno == bp->b_lblkno) - bp->b_blkno = bp->b_lblkno * (vp->v_bufobj.bo_bsize / - DEV_BSIZE); + bp->b_blkno = bp->b_lblkno * (vp2bo(vp)->bo_bsize / DEV_BSIZE); if (bp->b_iocmd == BIO_READ) cr = bp->b_rcred; else @@ -2848,7 +2847,7 @@ slpflag = PCATCH; if (!commit) passone = 0; - bo = &vp->v_bufobj; + bo = vp2bo(vp); /* * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the * server, but has not been committed to stable storage on the server diff --git a/sys/fs/nfsserver/nfs_nfsdport.c b/sys/fs/nfsserver/nfs_nfsdport.c --- a/sys/fs/nfsserver/nfs_nfsdport.c +++ b/sys/fs/nfsserver/nfs_nfsdport.c @@ -1676,7 +1676,7 @@ VM_OBJECT_WUNLOCK(vp->v_object); } - bo = &vp->v_bufobj; + bo = vp2bo(vp); BO_LOCK(bo); while (cnt > 0) { struct buf *bp; @@ -1691,7 +1691,7 @@ * should not be set if B_INVAL is set there could be * a race here since we haven't locked the buffer). */ - if ((bp = gbincore(&vp->v_bufobj, lblkno)) != NULL) { + if ((bp = gbincore(bo, lblkno)) != NULL) { if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo)) == ENOLCK) { BO_LOCK(bo); diff --git a/sys/fs/nullfs/null_vfsops.c b/sys/fs/nullfs/null_vfsops.c --- a/sys/fs/nullfs/null_vfsops.c +++ b/sys/fs/nullfs/null_vfsops.c @@ -197,7 +197,7 @@ } mp->mnt_kern_flag |= MNTK_LOOKUP_EXCL_DOTDOT | MNTK_NOMSYNC; mp->mnt_kern_flag |= lowerrootvp->v_mount->mnt_kern_flag & - (MNTK_USES_BCACHE | MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS); + (MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS); MNT_IUNLOCK(mp); vfs_getnewfsid(mp); if ((xmp->nullm_flags & NULLM_CACHE) != 0) { diff --git a/sys/fs/smbfs/smbfs_io.c b/sys/fs/smbfs/smbfs_io.c --- a/sys/fs/smbfs/smbfs_io.c +++ b/sys/fs/smbfs/smbfs_io.c @@ -650,10 +650,10 @@ } np->n_flag |= NFLUSHINPROG; - if (vp->v_bufobj.bo_object != NULL) { - VM_OBJECT_WLOCK(vp->v_bufobj.bo_object); - vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC); - VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object); + if (vp->v_object != NULL) { + VM_OBJECT_WLOCK(vp->v_object); + vm_object_page_clean(vp->v_object, 0, 0, OBJPC_SYNC); + VM_OBJECT_WUNLOCK(vp->v_object); } error = vinvalbuf(vp, V_SAVE, PCATCH, 0); diff --git a/sys/fs/udf/udf_vfsops.c b/sys/fs/udf/udf_vfsops.c --- a/sys/fs/udf/udf_vfsops.c +++ b/sys/fs/udf/udf_vfsops.c @@ -334,7 +334,7 @@ if (error) goto bail; - bo = &devvp->v_bufobj; + bo = vp2bo(devvp); if (devvp->v_rdev->si_iosize_max != 0) mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max; @@ -355,7 +355,8 @@ mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum; MNT_ILOCK(mp); mp->mnt_flag |= MNT_LOCAL; - mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED; + mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | + MNTK_USES_BCACHE; MNT_IUNLOCK(mp); udfmp->im_mountp = mp; udfmp->im_dev = dev; diff --git a/sys/fs/udf/udf_vnops.c b/sys/fs/udf/udf_vnops.c --- a/sys/fs/udf/udf_vnops.c +++ b/sys/fs/udf/udf_vnops.c @@ -1065,7 +1065,7 @@ node = VTON(a->a_vp); if (a->a_bop != NULL) - *a->a_bop = &node->udfmp->im_devvp->v_bufobj; + *a->a_bop = vp2bo(node->udfmp->im_devvp); if (a->a_bnp == NULL) return (0); if (a->a_runb) diff --git a/sys/geom/geom_vfs.c b/sys/geom/geom_vfs.c --- a/sys/geom/geom_vfs.c +++ b/sys/geom/geom_vfs.c @@ -247,7 +247,7 @@ g_topology_assert(); *cpp = NULL; - bo = &vp->v_bufobj; + bo = vp2bo(vp); if (bo->bo_private != vp) return (EBUSY); @@ -291,7 +291,8 @@ gp = cp->geom; sc = gp->softc; - bufobj_invalbuf(sc->sc_bo, V_SAVE, 0, 0); + bufobj_invalbuf(bo2vnode(sc->sc_bo)->v_object, sc->sc_bo, V_SAVE, + 0, 0); sc->sc_bo->bo_private = cp->private; gp->softc = NULL; mtx_destroy(&sc->sc_mtx); diff --git a/sys/kern/vfs_aio.c b/sys/kern/vfs_aio.c --- a/sys/kern/vfs_aio.c +++ b/sys/kern/vfs_aio.c @@ -1234,7 +1234,7 @@ vp = fp->f_vnode; if (vp->v_type != VCHR) return (-1); - if (vp->v_bufobj.bo_bsize == 0) + if (vp2bo(vp)->bo_bsize == 0) return (-1); bio_cmd = (opcode & LIO_WRITE) ? BIO_WRITE : BIO_READ; @@ -1242,7 +1242,7 @@ if (iovcnt > max_buf_aio) return (-1); for (i = 0; i < iovcnt; i++) { - if (job->uiop->uio_iov[i].iov_len % vp->v_bufobj.bo_bsize != 0) + if (job->uiop->uio_iov[i].iov_len % vp2bo(vp)->bo_bsize != 0) return (-1); if (job->uiop->uio_iov[i].iov_len > maxphys) { error = -1; diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -2890,12 +2890,12 @@ int i, iosize, resid; bool bogus; - obj = bp->b_bufobj->bo_object; + vp = bp->b_vp; + obj = vp->v_object; KASSERT(blockcount_read(&obj->paging_in_progress) >= bp->b_npages, ("vfs_vmio_iodone: paging in progress(%d) < b_npages(%d)", blockcount_read(&obj->paging_in_progress), bp->b_npages)); - vp = bp->b_vp; VNPASS(vp->v_holdcnt > 0, vp); VNPASS(vp->v_object != NULL, vp); @@ -2976,7 +2976,7 @@ * See man buf(9) for more information */ flags = (bp->b_flags & B_NOREUSE) != 0 ? VPR_NOREUSE : 0; - obj = bp->b_bufobj->bo_object; + obj = bp->b_vp->v_object; resid = bp->b_bufsize; poffset = bp->b_offset & PAGE_MASK; VM_OBJECT_WLOCK(obj); @@ -3027,7 +3027,7 @@ flags = (bp->b_flags & B_NOREUSE) != 0 ? VPR_NOREUSE : 0; if ((bp->b_flags & B_DIRECT) != 0) { flags |= VPR_TRYFREE; - obj = bp->b_bufobj->bo_object; + obj = bp->b_vp->v_object; VM_OBJECT_WLOCK(obj); } else { obj = NULL; @@ -3066,7 +3066,7 @@ * them if necessary. We must clear B_CACHE if these pages * are not valid for the range covered by the buffer. */ - obj = bp->b_bufobj->bo_object; + obj = bp->b_vp->v_object; if (bp->b_npages < desiredpages) { KASSERT(desiredpages <= atop(maxbcachebuf), ("vfs_vmio_extend past maxbcachebuf %p %d %u", @@ -3140,7 +3140,7 @@ match = 0; /* If the buf isn't in core skip it */ - if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL) + if ((bpa = gbincore(vp2bo(vp), lblkno)) == NULL) return (0); /* If the buf is busy we don't want to wait for it */ @@ -3188,7 +3188,7 @@ int maxcl; int gbflags; - bo = &vp->v_bufobj; + bo = vp2bo(vp); gbflags = (bp->b_data == unmapped_buf) ? GB_UNMAPPED : 0; /* * right now we support clustered writing only to regular files. If @@ -3296,7 +3296,7 @@ if (vp == NULL) bd = &bdomain[0]; else - bd = &bdomain[vp->v_bufobj.bo_domain]; + bd = &bdomain[vp2bo(vp)->bo_domain]; counter_u64_add(getnewbufcalls, 1); reserved = false; @@ -3631,7 +3631,7 @@ ASSERT_VOP_LOCKED(vp, "inmem"); - if (incore(&vp->v_bufobj, blkno)) + if (incore(vp2bo(vp), blkno)) return (true); if (vp->v_mount == NULL) return (false); @@ -3899,7 +3899,7 @@ if (!unmapped_buf_allowed) flags &= ~(GB_UNMAPPED | GB_KVAALLOC); - bo = &vp->v_bufobj; + bo = vp2bo(vp); d_blkno = dblkno; /* Attempt lockless lookup first. */ @@ -4148,14 +4148,8 @@ if (vmio) { bp->b_flags |= B_VMIO; - KASSERT(vp->v_object == bp->b_bufobj->bo_object, - ("ARGH! different b_bufobj->bo_object %p %p %p\n", - bp, vp->v_object, bp->b_bufobj->bo_object)); } else { bp->b_flags &= ~B_VMIO; - KASSERT(bp->b_bufobj->bo_object == NULL, - ("ARGH! has b_bufobj->bo_object %p %p\n", - bp, bp->b_bufobj->bo_object)); BUF_CHECK_MAPPED(bp); } @@ -4538,7 +4532,7 @@ if (!(bp->b_flags & B_VMIO)) return; - obj = bp->b_bufobj->bo_object; + obj = bp->b_vp->v_object; for (i = 0; i < bp->b_npages; i++) { m = bp->b_pages[i]; if (m == bogus_page) { @@ -4669,7 +4663,7 @@ if (!(bp->b_flags & B_VMIO)) return; - obj = bp->b_bufobj->bo_object; + obj = bp->b_vp->v_object; foff = bp->b_offset; KASSERT(bp->b_offset != NOOFFSET, ("vfs_busy_pages: no buffer offset")); @@ -5494,12 +5488,12 @@ } vp = (struct vnode *)addr; db_printf("Clean buffers:\n"); - TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) { + TAILQ_FOREACH(bp, &vp2bo(vp)->bo_clean.bv_hd, b_bobufs) { db_show_buffer((uintptr_t)bp, 1, 0, NULL); db_printf("\n"); } db_printf("Dirty buffers:\n"); - TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) { + TAILQ_FOREACH(bp, &vp2bo(vp)->bo_dirty.bv_hd, b_bobufs) { db_show_buffer((uintptr_t)bp, 1, 0, NULL); db_printf("\n"); } diff --git a/sys/kern/vfs_cluster.c b/sys/kern/vfs_cluster.c --- a/sys/kern/vfs_cluster.c +++ b/sys/kern/vfs_cluster.c @@ -105,7 +105,7 @@ error = 0; td = curthread; - bo = &vp->v_bufobj; + bo = vp2bo(vp); if (!unmapped_buf_allowed) gbflags &= ~GB_UNMAPPED; @@ -150,7 +150,7 @@ * Stop if the buffer does not exist or it * is invalid (about to go away?) */ - rbp = gbincore(&vp->v_bufobj, lblkno+i); + rbp = gbincore(vp2bo(vp), lblkno + i); if (rbp == NULL || (rbp->b_flags & B_INVAL)) break; @@ -410,7 +410,7 @@ inc = btodb(size); for (bn = blkno, i = 0; i < run; ++i, bn += inc) { if (i == 0) { - vm_object_pip_add(tbp->b_bufobj->bo_object, + vm_object_pip_add(tbp->b_vp->v_object, tbp->b_npages); vfs_busy_pages_acquire(tbp); } else { @@ -461,13 +461,13 @@ vm_page_sunbusy(tbp->b_pages[j]); break; } - vm_object_pip_add(tbp->b_bufobj->bo_object, 1); + vm_object_pip_add(tbp->b_vp->v_object, 1); off += tinc; tsize -= tinc; } if (tsize > 0) { clean_sbusy: - vm_object_pip_wakeupn(tbp->b_bufobj->bo_object, + vm_object_pip_wakeupn(tbp->b_vp->v_object, j); for (k = 0; k < j; k++) vm_page_sunbusy(tbp->b_pages[k]); @@ -815,7 +815,7 @@ if (!unmapped_buf_allowed) gbflags &= ~GB_UNMAPPED; - bo = &vp->v_bufobj; + bo = vp2bo(vp); while (len > 0) { /* * If the buffer is not delayed-write (i.e. dirty), or it @@ -823,7 +823,7 @@ * partake in the clustered write. */ BO_LOCK(bo); - if ((tbp = gbincore(&vp->v_bufobj, start_lbn)) == NULL || + if ((tbp = gbincore(bo, start_lbn)) == NULL || (tbp->b_vflags & BV_BKGRDINPROG)) { BO_UNLOCK(bo); ++start_lbn; @@ -985,7 +985,7 @@ } } } - vm_object_pip_add(tbp->b_bufobj->bo_object, + vm_object_pip_add(tbp->b_vp->v_object, tbp->b_npages); for (j = 0; j < tbp->b_npages; j += 1) { m = tbp->b_pages[j]; diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c --- a/sys/kern/vfs_default.c +++ b/sys/kern/vfs_default.c @@ -715,11 +715,18 @@ int *a_runb; } */ *ap; { + struct vnode *vp; - if (ap->a_bop != NULL) - *ap->a_bop = &ap->a_vp->v_bufobj; + vp = ap->a_vp; + if (ap->a_bop != NULL) { + if ((vp->v_irflag & VIRF_BUFOBJ) != 0) + *ap->a_bop = vp2bo(vp); + else + *ap->a_bop = NULL; + } if (ap->a_bnp != NULL) - *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize); + *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount-> + mnt_stat.f_iosize); if (ap->a_runp != NULL) *ap->a_runp = 0; if (ap->a_runb != NULL) @@ -1096,6 +1103,8 @@ break; } + bo = (vp->v_irflag & VIRF_BUFOBJ) != 0 ? vp2bo(vp) : NULL; + /* * Round to block boundaries (and later possibly further to * page boundaries). Applications cannot reasonably be aware @@ -1105,7 +1114,10 @@ * discarding of buffers but is efficient enough since the * pages usually remain in VMIO for some time. */ - bsize = vp->v_bufobj.bo_bsize; + if (bo != NULL) + bsize = bo->bo_bsize; + else + bsize = vp->v_mount->mnt_stat.f_bsize; bstart = rounddown(ap->a_start, bsize); bend = roundup(ap->a_end, bsize); @@ -1124,14 +1136,16 @@ VM_OBJECT_RUNLOCK(vp->v_object); } - bo = &vp->v_bufobj; - BO_RLOCK(bo); - startn = bstart / bsize; - endn = bend / bsize; - error = bnoreuselist(&bo->bo_clean, bo, startn, endn); - if (error == 0) - error = bnoreuselist(&bo->bo_dirty, bo, startn, endn); - BO_RUNLOCK(bo); + if (bo != NULL) { + BO_RLOCK(bo); + startn = bstart / bsize; + endn = bend / bsize; + error = bnoreuselist(&bo->bo_clean, bo, startn, endn); + if (error == 0) + error = bnoreuselist(&bo->bo_dirty, bo, startn, + endn); + BO_RUNLOCK(bo); + } VOP_UNLOCK(vp); break; default: @@ -1350,7 +1364,8 @@ */ loop: MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { - if (vp->v_bufobj.bo_dirty.bv_cnt == 0) { + if ((vp->v_irflag & VIRF_BUFOBJ) != 0 && + vp2bo(vp)->bo_dirty.bv_cnt == 0) { VI_UNLOCK(vp); continue; } diff --git a/sys/kern/vfs_mount.c b/sys/kern/vfs_mount.c --- a/sys/kern/vfs_mount.c +++ b/sys/kern/vfs_mount.c @@ -971,7 +971,7 @@ error = VOP_GETATTR(vp, &va, td->td_ucred); if (error == 0 && va.va_uid != td->td_ucred->cr_uid) error = priv_check_cred(td->td_ucred, PRIV_VFS_ADMIN); - if (error == 0) + if (error == 0 && (vp->v_irflag & VIRF_BUFOBJ) != 0) error = vinvalbuf(vp, V_SAVE, 0, 0); if (error == 0 && vp->v_type != VDIR) error = ENOTDIR; diff --git a/sys/kern/vfs_mountroot.c b/sys/kern/vfs_mountroot.c --- a/sys/kern/vfs_mountroot.c +++ b/sys/kern/vfs_mountroot.c @@ -363,7 +363,7 @@ if (!error) { vp = nd.ni_vp; error = (vp->v_type == VDIR) ? 0 : ENOTDIR; - if (!error) + if (!error && (vp->v_irflag & VIRF_BUFOBJ) != 0) error = vinvalbuf(vp, V_SAVE, 0, 0); if (!error) { cache_purge(vp); @@ -388,7 +388,7 @@ if (!error) { vp = nd.ni_vp; error = (vp->v_type == VDIR) ? 0 : ENOTDIR; - if (!error) + if (!error && (vp->v_irflag & VIRF_BUFOBJ) != 0) error = vinvalbuf(vp, V_SAVE, 0, 0); if (!error) { vpdevfs = mpdevfs->mnt_vnodecovered; diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -120,6 +120,8 @@ daddr_t startlbn, daddr_t endlbn); static void vnlru_recalc(void); +static struct vop_vector sync_vnodeops; + /* * These fences are intended for cases where some synchronization is * needed between access of v_iflags and lockless vnode refcount (v_holdcnt @@ -229,6 +231,7 @@ /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ static uma_zone_t vnode_zone; +static uma_zone_t vnode_bo_zone; MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll"); __read_frequently smr_t vfs_smr; @@ -547,10 +550,6 @@ */ lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, LK_NOSHARE | LK_IS_VNODE); - /* - * Initialize bufobj. - */ - bufobj_init(&vp->v_bufobj, vp); /* * Initialize namecache. */ @@ -573,6 +572,26 @@ return (0); } +static int +vnode_init_bo(void *mem, int size, int flags) +{ + struct vnode_bo *vpo; + + /* + * size includes the bufobj part, it is zeroed for us. + */ + vnode_init(mem, size, flags); + + vpo = mem; + vpo->vb_v.v_irflag |= VIRF_BUFOBJ; + + /* + * Initialize bufobj. + */ + bufobj_init(&vpo->vb_bo, &vpo->vb_v); + return (0); +} + /* * Free a vnode when it is cleared from the zone. */ @@ -580,7 +599,6 @@ vnode_fini(void *mem, int size) { struct vnode *vp; - struct bufobj *bo; vp = mem; vdbatch_dequeue(vp); @@ -590,8 +608,18 @@ rangelock_destroy(&vp->v_rl); lockdestroy(vp->v_vnlock); mtx_destroy(&vp->v_interlock); - bo = &vp->v_bufobj; - rw_destroy(BO_LOCKPTR(bo)); +} + +static void +vnode_fini_bo(void *mem, int size) +{ + struct vnode_bo *vpo; + + vnode_fini(mem, size); + + vpo = mem; + MPASS((vpo->vb_v.v_irflag & VIRF_BUFOBJ) != 0); + rw_destroy(BO_LOCKPTR(&vpo->vb_bo)); } /* @@ -658,9 +686,12 @@ TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); vnode_list_reclaim_marker = vn_alloc_marker(NULL); TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); - vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, + vnode_zone = uma_zcreate("VNODE", sizeof(struct vnode), NULL, NULL, vnode_init, vnode_fini, UMA_ALIGN_PTR, 0); + vnode_bo_zone = uma_zcreate("VNODEBO", sizeof(struct vnode_bo), NULL, + NULL, vnode_init_bo, vnode_fini_bo, UMA_ALIGN_PTR, 0); uma_zone_set_smr(vnode_zone, vfs_smr); + uma_zone_set_smr(vnode_bo_zone, vfs_smr); /* * Preallocate enough nodes to support one-per buf so that * we can not fail an insert. reassignbuf() callers can not @@ -1648,7 +1679,7 @@ static u_long vn_alloc_cyclecount; static struct vnode * __noinline -vn_alloc_hard(struct mount *mp) +vn_alloc_hard(uma_zone_t zone, struct mount *mp) { u_long rnumvnodes, rfreevnodes; @@ -1690,23 +1721,26 @@ if (vnlru_under(rnumvnodes, vlowat)) vnlru_kick(); mtx_unlock(&vnode_list_mtx); - return (uma_zalloc_smr(vnode_zone, M_WAITOK)); + return (uma_zalloc_smr(zone, M_WAITOK)); } static struct vnode * -vn_alloc(struct mount *mp) +vn_alloc(struct mount *mp, bool need_bufobj) { + uma_zone_t zone; u_long rnumvnodes; + zone = need_bufobj ? vnode_bo_zone : vnode_zone; + if (__predict_false(vn_alloc_cyclecount != 0)) - return (vn_alloc_hard(mp)); + return (vn_alloc_hard(zone, mp)); rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; if (__predict_false(vnlru_under_unlocked(rnumvnodes, vlowat))) { atomic_subtract_long(&numvnodes, 1); - return (vn_alloc_hard(mp)); + return (vn_alloc_hard(zone, mp)); } - return (uma_zalloc_smr(vnode_zone, M_WAITOK)); + return (uma_zalloc_smr(zone, M_WAITOK)); } static void @@ -1714,7 +1748,8 @@ { atomic_subtract_long(&numvnodes, 1); - uma_zfree_smr(vnode_zone, vp); + uma_zfree_smr((vp->v_irflag & VIRF_BUFOBJ) != 0 ? vnode_bo_zone : + vnode_zone, vp); } /* @@ -1727,18 +1762,25 @@ struct vnode *vp; struct thread *td; struct lock_object *lo; + bool need_bufobj; CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); KASSERT(vops->registered, ("%s: not registered vector op %p\n", __func__, vops)); + need_bufobj = (mp != NULL && + (mp->mnt_kern_flag & MNTK_USES_BCACHE) != 0) || + vops == &sync_vnodeops; + td = curthread; if (td->td_vp_reserved != NULL) { vp = td->td_vp_reserved; td->td_vp_reserved = NULL; + VNASSERT(need_bufobj == ((vp->v_irflag & VIRF_BUFOBJ) != 0), + vp, ("need_bufobj")); } else { - vp = vn_alloc(mp); + vp = vn_alloc(mp, need_bufobj); } counter_u64_add(vnodes_created, 1); /* @@ -1777,10 +1819,11 @@ KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); vp->v_type = VNON; vp->v_op = vops; - vp->v_irflag = 0; + vp->v_irflag &= VIRF_BUFOBJ; v_init_counters(vp); vn_seqc_init(vp); - vp->v_bufobj.bo_ops = &buf_ops_bio; + if (need_bufobj) + vp2bo(vp)->bo_ops = &buf_ops_bio; #ifdef DIAGNOSTIC if (mp == NULL && vops != &dead_vnodeops) printf("NULL mp in getnewvnode(9), tag %s\n", tag); @@ -1791,7 +1834,8 @@ mac_vnode_associate_singlelabel(mp, vp); #endif if (mp != NULL) { - vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; + if (need_bufobj) + vp2bo(vp)->bo_bsize = mp->mnt_stat.f_iosize; if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) vp->v_vflag |= VV_NOKNOTE; } @@ -1809,13 +1853,14 @@ } void -getnewvnode_reserve(void) +getnewvnode_reserve(struct mount *mp) { struct thread *td; td = curthread; MPASS(td->td_vp_reserved == NULL); - td->td_vp_reserved = vn_alloc(NULL); + td->td_vp_reserved = vn_alloc(NULL, mp != NULL && + (mp->mnt_kern_flag & MNTK_USES_BCACHE) != 0); } void @@ -1850,17 +1895,20 @@ */ vn_seqc_write_end_free(vp); - bo = &vp->v_bufobj; + bo = (vp->v_irflag & VIRF_BUFOBJ) != 0 ? vp2bo(vp) : NULL; VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); - VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); - VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); - VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, + VNASSERT(bo == NULL || bo->bo_numoutput == 0, vp, + ("Clean vnode has pending I/O's")); + VNASSERT(bo == NULL || bo->bo_clean.bv_cnt == 0, vp, + ("cleanbufcnt not 0")); + VNASSERT(bo == NULL || pctrie_is_empty(&bo->bo_clean.bv_root), vp, ("clean blk trie not empty")); - VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); - VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, + VNASSERT(bo == NULL || bo->bo_dirty.bv_cnt == 0, vp, + ("dirtybufcnt not 0")); + VNASSERT(bo == NULL || pctrie_is_empty(&bo->bo_dirty.bv_root), vp, ("dirty blk trie not empty")); VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); @@ -1883,7 +1931,8 @@ vp->v_fifoinfo = NULL; vp->v_iflag = 0; vp->v_vflag = 0; - bo->bo_flag = 0; + if (bo != NULL) + bo->bo_flag = 0; vn_free(vp); } @@ -1979,7 +2028,8 @@ * Called with the underlying object locked. */ int -bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) +bufobj_invalbuf(vm_object_t obj, struct bufobj *bo, int flags, int slpflag, + int slptimeo) { int error; @@ -2030,9 +2080,9 @@ */ do { bufobj_wwait(bo, 0, 0); - if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { + if ((flags & V_VMIO) == 0 && obj != NULL) { BO_UNLOCK(bo); - vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); + vm_object_pip_wait_unlocked(obj, "bovlbx"); BO_LOCK(bo); } } while (bo->bo_numoutput > 0); @@ -2041,12 +2091,12 @@ /* * Destroy the copy in the VM cache, too. */ - if (bo->bo_object != NULL && + if (obj != NULL && (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { - VM_OBJECT_WLOCK(bo->bo_object); - vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? + VM_OBJECT_WLOCK(obj); + vm_object_page_remove(obj, 0, 0, (flags & V_SAVE) ? OBJPR_CLEANONLY : 0); - VM_OBJECT_WUNLOCK(bo->bo_object); + VM_OBJECT_WUNLOCK(obj); } #ifdef INVARIANTS @@ -2070,12 +2120,15 @@ int vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) { + vm_object_t obj; CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); ASSERT_VOP_LOCKED(vp, "vinvalbuf"); - if (vp->v_object != NULL && vp->v_object->handle != vp) + + obj = vp->v_object; + if (obj != NULL && obj->handle != vp) return (0); - return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); + return (bufobj_invalbuf(obj, vp2bo(vp), flags, slpflag, slptimeo)); } /* @@ -2217,7 +2270,7 @@ ASSERT_VOP_LOCKED(vp, "vtruncbuf"); - bo = &vp->v_bufobj; + bo = vp2bo(vp); restart_unlocked: BO_LOCK(bo); @@ -2271,7 +2324,7 @@ start = blksize * startlbn; end = blksize * endlbn; - bo = &vp->v_bufobj; + bo = vp2bo(vp); BO_LOCK(bo); MPASS(blksize == bo->bo_bsize); @@ -2452,7 +2505,7 @@ { struct bufobj *bo; - bo = &vp->v_bufobj; + bo = vp2bo(vp); ASSERT_BO_WLOCKED(bo); VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); @@ -2485,7 +2538,7 @@ * Delete from old vnode list, if on one. */ vp = bp->b_vp; /* XXX */ - bo = bp->b_bufobj; + bo = vp2bo(vp); BO_LOCK(bo); buf_vlist_remove(bp); if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { @@ -3929,6 +3982,7 @@ struct thread *td; struct mount *mp; vm_object_t object; + struct bufobj *bo; bool active, doinginact, oweinact; ASSERT_VOP_ELOCKED(vp, "vgonel"); @@ -4000,29 +4054,33 @@ * If the flush fails, just toss the buffers. */ mp = NULL; - if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) - (void) vn_start_secondary_write(vp, &mp, V_WAIT); - if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { - while (vinvalbuf(vp, 0, 0, 0) != 0) - ; - } - BO_LOCK(&vp->v_bufobj); - KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && - vp->v_bufobj.bo_dirty.bv_cnt == 0 && - TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && - vp->v_bufobj.bo_clean.bv_cnt == 0, - ("vp %p bufobj not invalidated", vp)); + bo = (vp->v_irflag & VIRF_BUFOBJ) != 0 ? vp2bo(vp) : NULL; + object = vp->v_object; + if (bo != NULL) { + if (!TAILQ_EMPTY(&bo->bo_dirty.bv_hd)) + (void) vn_start_secondary_write(vp, &mp, V_WAIT); + if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { + while (vinvalbuf(vp, 0, 0, 0) != 0) + ; + } - /* - * For VMIO bufobj, BO_DEAD is set later, or in - * vm_object_terminate() after the object's page queue is - * flushed. - */ - object = vp->v_bufobj.bo_object; - if (object == NULL) - vp->v_bufobj.bo_flag |= BO_DEAD; - BO_UNLOCK(&vp->v_bufobj); + BO_LOCK(bo); + KASSERT(TAILQ_EMPTY(&bo->bo_dirty.bv_hd) && + bo->bo_dirty.bv_cnt == 0 && + TAILQ_EMPTY(&bo->bo_clean.bv_hd) && + bo->bo_clean.bv_cnt == 0, + ("vp %p bufobj not invalidated", vp)); + + /* + * For VMIO bufobj, BO_DEAD is set later, or in + * vm_object_terminate() after the object's page queue is + * flushed. + */ + if (object == NULL) + bo->bo_flag |= BO_DEAD; + BO_UNLOCK(bo); + } /* * Handle the VM part. Tmpfs handles v_object on its own (the @@ -4075,6 +4133,7 @@ void vn_printf(struct vnode *vp, const char *fmt, ...) { + struct bufobj *bo; va_list ap; char buf[256], buf2[16]; u_long flags; @@ -4122,7 +4181,10 @@ strlcat(buf, "|VIRF_PGREAD", sizeof(buf)); if (irflag & VIRF_MOUNTPOINT) strlcat(buf, "|VIRF_MOUNTPOINT", sizeof(buf)); - flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD | VIRF_MOUNTPOINT); + if (irflag & VIRF_BUFOBJ) + strlcat(buf, "|VIRF_BUFOBJ", sizeof(buf)); + flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD | VIRF_MOUNTPOINT | + VIRF_BUFOBJ); if (flags != 0) { snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); strlcat(buf, buf2, sizeof(buf)); @@ -4192,14 +4254,17 @@ if (mtx_owned(VI_MTX(vp))) printf(" VI_LOCKed"); printf("\n"); - if (vp->v_object != NULL) - printf(" v_object %p ref %d pages %d " - "cleanbuf %d dirtybuf %d\n", + if (vp->v_object != NULL) { + printf(" v_object %p ref %d pages %d", vp->v_object, vp->v_object->ref_count, - vp->v_object->resident_page_count, - vp->v_bufobj.bo_clean.bv_cnt, - vp->v_bufobj.bo_dirty.bv_cnt); - printf(" "); + vp->v_object->resident_page_count); + } + bo = (vp->v_irflag & VIRF_BUFOBJ) != 0 ? vp2bo(vp) : NULL; + if (bo != NULL) { + printf(" cleanbuf %d dirtybuf %d", + bo->bo_clean.bv_cnt, bo->bo_dirty.bv_cnt); + } + printf("\n "); lockmgr_printinfo(vp->v_vnlock); if (vp->v_data != NULL) VOP_PRINT(vp); @@ -5002,7 +5067,7 @@ } next = start; } - bo = &vp->v_bufobj; + bo = vp2bo(vp); BO_LOCK(bo); vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ @@ -5055,7 +5120,7 @@ /* * Move ourselves to the back of the sync list. */ - bo = &syncvp->v_bufobj; + bo = vp2bo(syncvp); BO_LOCK(bo); vn_syncer_add_to_worklist(bo, syncdelay); BO_UNLOCK(bo); @@ -5105,7 +5170,7 @@ struct vnode *vp = ap->a_vp; struct bufobj *bo; - bo = &vp->v_bufobj; + bo = vp2bo(vp); BO_LOCK(bo); mtx_lock(&sync_mtx); if (vp->v_mount->mnt_syncer == vp) diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c --- a/sys/kern/vfs_vnops.c +++ b/sys/kern/vfs_vnops.c @@ -2737,7 +2737,7 @@ mp = vp->v_rdev->si_mountpt; VI_UNLOCK(vp); } - bo = &vp->v_bufobj; + bo = vp2bo(vp); BO_LOCK(bo); loop1: /* diff --git a/sys/sys/bufobj.h b/sys/sys/bufobj.h --- a/sys/sys/bufobj.h +++ b/sys/sys/bufobj.h @@ -60,6 +60,7 @@ struct bufobj; struct buf_ops; +struct vm_object; extern struct buf_ops buf_ops_bio; @@ -99,7 +100,6 @@ struct bufobj { struct rwlock bo_lock; /* Lock which protects "i" things */ struct buf_ops *bo_ops; /* - Buffer operations */ - struct vm_object *bo_object; /* v Place to store VM object */ LIST_ENTRY(bufobj) bo_synclist; /* S dirty vnode list */ void *bo_private; /* private pointer */ struct bufv bo_clean; /* i Clean buffers */ @@ -132,7 +132,8 @@ void bufobj_wdrop(struct bufobj *bo); void bufobj_wref(struct bufobj *bo); void bufobj_wrefl(struct bufobj *bo); -int bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo); +int bufobj_invalbuf(struct vm_object *obj, struct bufobj *bo, int flags, + int slpflag, int slptimeo); int bufobj_wwait(struct bufobj *bo, int slpflag, int timeo); int bufsync(struct bufobj *bo, int waitfor); void bufbdflush(struct bufobj *bo, struct buf *bp); diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h --- a/sys/sys/vnode.h +++ b/sys/sys/vnode.h @@ -154,7 +154,7 @@ */ TAILQ_ENTRY(vnode) v_vnodelist; /* l vnode lists */ TAILQ_ENTRY(vnode) v_lazylist; /* l vnode lazy list */ - struct bufobj v_bufobj; /* * Buffer cache object */ + struct vm_object *v_object; /* v page cache container */ /* * Hooks for various subsystems and features. @@ -175,24 +175,13 @@ int v_seqc_users; /* i modifications pending */ }; -#ifndef DEBUG_LOCKS -#ifdef _LP64 -/* - * Not crossing 448 bytes fits 9 vnodes per page. If you have to add fields - * to the structure and there is nothing which can be done to prevent growth - * then so be it. But don't grow it without a good reason. - */ -_Static_assert(sizeof(struct vnode) <= 448, "vnode size crosses 448 bytes"); -#endif -#endif +struct vnode_bo { + struct vnode vb_v; + struct bufobj vb_bo; +}; #endif /* defined(_KERNEL) || defined(_KVM_VNODE) */ -#define bo2vnode(bo) __containerof((bo), struct vnode, v_bufobj) - -/* XXX: These are temporary to avoid a source sweep at this time */ -#define v_object v_bufobj.bo_object - /* * Userland version of struct vnode, for sysctl. */ @@ -251,6 +240,7 @@ #define VIRF_PGREAD 0x0002 /* Direct reads from the page cache are permitted, never cleared once set */ #define VIRF_MOUNTPOINT 0x0004 /* This vnode is mounted on */ +#define VIRF_BUFOBJ 0x0008 /* Uses buffers, bufobj is valid */ #define VI_TEXT_REF 0x0001 /* Text ref grabbed use ref */ #define VI_MOUNT 0x0002 /* Mount in progress */ @@ -525,6 +515,20 @@ }; #ifdef _KERNEL + +#define bo2vnode(bo) (&(__containerof((bo), struct vnode_bo, vb_bo)->vb_v)) + +static inline struct bufobj * +vp2bo(struct vnode *vp) +{ + struct vnode_bo *vpo; + + KASSERT((vp->v_irflag & VIRF_BUFOBJ) != 0, + ("vp2bo vp %p does not have bufobj", vp)); + vpo = __containerof(vp, struct vnode_bo, vb_v); + return (&vpo->vb_bo); +} + /* * A list of all the operation descs. */ @@ -676,7 +680,7 @@ int freebsd11_cvtstat(struct stat *st, struct freebsd11_stat *ost); int getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, struct vnode **vpp); -void getnewvnode_reserve(void); +void getnewvnode_reserve(struct mount *mp); void getnewvnode_drop_reserve(void); int insmntque1(struct vnode *vp, struct mount *mp, void (*dtr)(struct vnode *, void *), void *dtr_arg); diff --git a/sys/ufs/ffs/ffs_inode.c b/sys/ufs/ffs/ffs_inode.c --- a/sys/ufs/ffs/ffs_inode.c +++ b/sys/ufs/ffs/ffs_inode.c @@ -249,7 +249,7 @@ ip = VTOI(vp); ump = VFSTOUFS(vp->v_mount); fs = ump->um_fs; - bo = &vp->v_bufobj; + bo = vp2bo(vp); ASSERT_VOP_LOCKED(vp, "ffs_truncate"); diff --git a/sys/ufs/ffs/ffs_rawread.c b/sys/ufs/ffs/ffs_rawread.c --- a/sys/ufs/ffs/ffs_rawread.c +++ b/sys/ufs/ffs/ffs_rawread.c @@ -103,7 +103,7 @@ vm_object_t obj; /* Check for dirty mmap, pending writes and dirty buffers */ - bo = &vp->v_bufobj; + bo = vp2bo(vp); BO_LOCK(bo); VI_LOCK(vp); if (bo->bo_numoutput > 0 || @@ -150,7 +150,7 @@ /* Wait for pending writes to complete */ BO_LOCK(bo); - error = bufobj_wwait(&vp->v_bufobj, 0, 0); + error = bufobj_wwait(bo, 0, 0); if (error != 0) { /* XXX: can't happen with a zero timeout ??? */ BO_UNLOCK(bo); @@ -254,7 +254,7 @@ if (vmapbuf(bp, udata, bp->b_bcount, 1) < 0) return EFAULT; - BO_STRATEGY(&dp->v_bufobj, bp); + BO_STRATEGY(vp2bo(dp), bp); return 0; } @@ -428,7 +428,7 @@ /* Only handle sector aligned reads */ ip = VTOI(vp); - secsize = ITODEVVP(ip)->v_bufobj.bo_bsize; + secsize = vp2bo(ITODEVVP(ip))->bo_bsize; if ((uio->uio_offset & (secsize - 1)) == 0 && (uio->uio_resid & (secsize - 1)) == 0) { diff --git a/sys/ufs/ffs/ffs_snapshot.c b/sys/ufs/ffs/ffs_snapshot.c --- a/sys/ufs/ffs/ffs_snapshot.c +++ b/sys/ufs/ffs/ffs_snapshot.c @@ -2567,7 +2567,7 @@ bip->bio_length = bp->b_bcount; bip->bio_done = NULL; - g_io_request(bip, ITODEVVP(ip)->v_bufobj.bo_private); + g_io_request(bip, vp2bo(ITODEVVP(ip))->bo_private); bp->b_error = biowait(bip, "snaprdb"); g_destroy_bio(bip); return (bp->b_error); diff --git a/sys/ufs/ffs/ffs_softdep.c b/sys/ufs/ffs/ffs_softdep.c --- a/sys/ufs/ffs/ffs_softdep.c +++ b/sys/ufs/ffs/ffs_softdep.c @@ -3467,7 +3467,7 @@ rec->jsr_seq = jseg->js_seq; rec->jsr_oldest = jseg->js_oldseq; rec->jsr_cnt = jseg->js_cnt; - rec->jsr_blocks = jseg->js_size / ump->um_devvp->v_bufobj.bo_bsize; + rec->jsr_blocks = jseg->js_size / vp2bo(ump->um_devvp)->bo_bsize; rec->jsr_crc = 0; rec->jsr_time = ump->um_fs->fs_mtime; } @@ -3734,7 +3734,7 @@ LOCK_OWNED(ump); fs = ump->um_fs; jblocks = ump->softdep_jblocks; - devbsize = ump->um_devvp->v_bufobj.bo_bsize; + devbsize = vp2bo(ump->um_devvp)->bo_bsize; /* * We write anywhere between a disk block and fs block. The upper * bound is picked to prevent buffer cache fragmentation and limit @@ -7411,9 +7411,9 @@ vn_pages_remove(vp, extend, 0); if ((flags & IO_NORMAL) == 0) return; - BO_LOCK(&vp->v_bufobj); + BO_LOCK(vp2bo(vp)); drain_output(vp); - BO_UNLOCK(&vp->v_bufobj); + BO_UNLOCK(vp2bo(vp)); /* * The vnode pager eliminates file pages we eliminate indirects * below. @@ -7503,7 +7503,7 @@ * any dependencies. */ vp = ITOV(ip); - bo = &vp->v_bufobj; + bo = vp2bo(vp); BO_LOCK(bo); drain_output(vp); TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) @@ -8515,7 +8515,7 @@ if (bp == NULL || bp->b_blkno != dbn) panic("indir_trunc: Bad saved buf %p blkno %jd", bp, (intmax_t)dbn); - } else if ((bp = incore(&freeblks->fb_devvp->v_bufobj, dbn)) != NULL) { + } else if ((bp = incore(vp2bo(freeblks->fb_devvp), dbn)) != NULL) { /* * The lock prevents the buf dep list from changing and * indirects on devvp should only ever have one dependency. @@ -13016,7 +13016,7 @@ if (!vn_isdisk(vp)) panic("softdep_fsync_mountdev: vnode not a disk"); - bo = &vp->v_bufobj; + bo = vp2bo(vp); restart: BO_LOCK(bo); TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { @@ -13417,7 +13417,7 @@ int error; error = 0; - bo = &vp->v_bufobj; + bo = vp2bo(vp); ip = VTOI(vp); blkno = DIP(ip, i_db[lbn]); if (blkno == 0) @@ -13977,7 +13977,7 @@ failed_vnode = 0; td = curthread; MNT_VNODE_FOREACH_ALL(lvp, mp, mvp) { - if (TAILQ_FIRST(&lvp->v_bufobj.bo_dirty.bv_hd) == 0) { + if (TAILQ_FIRST(&vp2bo(lvp)->bo_dirty.bv_hd) == NULL) { VI_UNLOCK(lvp); continue; } @@ -14303,7 +14303,7 @@ MPASS(VTOI(vp)->i_mode != 0); if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0))) softdep_error("clear_remove: fsync", error); - bo = &vp->v_bufobj; + bo = vp2bo(vp); BO_LOCK(bo); drain_output(vp); BO_UNLOCK(bo); @@ -14393,9 +14393,9 @@ } else { if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0))) softdep_error("clear_inodedeps: fsync2", error); - BO_LOCK(&vp->v_bufobj); + BO_LOCK(vp2bo(vp)); drain_output(vp); - BO_UNLOCK(&vp->v_bufobj); + BO_UNLOCK(vp2bo(vp)); } vput(vp); vn_finished_write(mp); @@ -14765,7 +14765,7 @@ struct inodedep *inodedep; int error, unlinked; - bo = &devvp->v_bufobj; + bo = vp2bo(devvp); ASSERT_BO_WLOCKED(bo); /* @@ -14892,7 +14892,7 @@ { ASSERT_VOP_LOCKED(vp, "drain_output"); - (void)bufobj_wwait(&vp->v_bufobj, 0, 0); + (void)bufobj_wwait(vp2bo(vp), 0, 0); } /* diff --git a/sys/ufs/ffs/ffs_vfsops.c b/sys/ufs/ffs/ffs_vfsops.c --- a/sys/ufs/ffs/ffs_vfsops.c +++ b/sys/ufs/ffs/ffs_vfsops.c @@ -1054,6 +1054,9 @@ ump = NULL; cred = td ? td->td_ucred : NOCRED; ronly = (mp->mnt_flag & MNT_RDONLY) != 0; + MNT_ILOCK(mp); + mp->mnt_kern_flag |= MNTK_USES_BCACHE; + MNT_IUNLOCK(mp); devvp = mntfs_allocvp(mp, odevvp); VOP_UNLOCK(odevvp); @@ -1074,10 +1077,10 @@ return (error); } dev_ref(dev); - devvp->v_bufobj.bo_ops = &ffs_ops; - BO_LOCK(&odevvp->v_bufobj); - odevvp->v_bufobj.bo_flag |= BO_NOBUFS; - BO_UNLOCK(&odevvp->v_bufobj); + vp2bo(devvp)->bo_ops = &ffs_ops; + BO_LOCK(vp2bo(odevvp)); + vp2bo(odevvp)->bo_flag |= BO_NOBUFS; + BO_UNLOCK(vp2bo(odevvp)); if (dev->si_iosize_max != 0) mp->mnt_iosize_max = dev->si_iosize_max; if (mp->mnt_iosize_max > maxphys) @@ -1157,7 +1160,7 @@ } ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO); ump->um_cp = cp; - ump->um_bo = &devvp->v_bufobj; + ump->um_bo = vp2bo(devvp); ump->um_fs = fs; if (fs->fs_magic == FS_UFS1_MAGIC) { ump->um_fstype = UFS1; @@ -1359,9 +1362,9 @@ free(ump, M_UFSMNT); mp->mnt_data = NULL; } - BO_LOCK(&odevvp->v_bufobj); - odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS; - BO_UNLOCK(&odevvp->v_bufobj); + BO_LOCK(vp2bo(odevvp)); + vp2bo(odevvp)->bo_flag &= ~BO_NOBUFS; + BO_UNLOCK(vp2bo(odevvp)); atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); mntfs_freevp(devvp); dev_rel(dev); @@ -1568,9 +1571,9 @@ } g_vfs_close(ump->um_cp); g_topology_unlock(); - BO_LOCK(&ump->um_odevvp->v_bufobj); - ump->um_odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS; - BO_UNLOCK(&ump->um_odevvp->v_bufobj); + BO_LOCK(vp2bo(ump->um_odevvp)); + vp2bo(ump->um_odevvp)->bo_flag &= ~BO_NOBUFS; + BO_UNLOCK(vp2bo(ump->um_odevvp)); atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0); mntfs_freevp(ump->um_devvp); vrele(ump->um_odevvp); @@ -1874,7 +1877,7 @@ ip = VTOI(vp); if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && - vp->v_bufobj.bo_dirty.bv_cnt == 0) { + vp2bo(vp)->bo_dirty.bv_cnt == 0) { VI_UNLOCK(vp); continue; } @@ -1912,7 +1915,7 @@ } devvp = ump->um_devvp; - bo = &devvp->v_bufobj; + bo = vp2bo(devvp); BO_LOCK(bo); if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { BO_UNLOCK(bo); @@ -2029,7 +2032,7 @@ lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); VN_LOCK_AREC(vp); vp->v_data = ip; - vp->v_bufobj.bo_bsize = fs->fs_bsize; + vp2bo(vp)->bo_bsize = fs->fs_bsize; ip->i_vnode = vp; ip->i_ump = ump; ip->i_number = ino; diff --git a/sys/ufs/ffs/ffs_vnops.c b/sys/ufs/ffs/ffs_vnops.c --- a/sys/ufs/ffs/ffs_vnops.c +++ b/sys/ufs/ffs/ffs_vnops.c @@ -225,7 +225,7 @@ int error; vp = ap->a_vp; - bo = &vp->v_bufobj; + bo = vp2bo(vp); retry: error = ffs_syncvnode(vp, ap->a_waitfor, 0); if (error) @@ -265,7 +265,7 @@ bool still_dirty, unlocked, wait; ip = VTOI(vp); - bo = &vp->v_bufobj; + bo = vp2bo(vp); ump = VFSTOUFS(vp->v_mount); /* @@ -1948,7 +1948,7 @@ vp = ap->a_vp; um = VFSTOUFS(vp->v_mount); - if (!use_buf_pager && um->um_devvp->v_bufobj.bo_bsize <= PAGE_SIZE) + if (!use_buf_pager && vp2bo(um->um_devvp)->bo_bsize <= PAGE_SIZE) return (vnode_pager_generic_getpages(vp, ap->a_m, ap->a_count, ap->a_rbehind, ap->a_rahead, NULL, NULL)); return (vfs_bio_getpages(vp, ap->a_m, ap->a_count, ap->a_rbehind, @@ -1967,7 +1967,7 @@ um = VFSTOUFS(vp->v_mount); do_iodone = true; - if (um->um_devvp->v_bufobj.bo_bsize <= PAGE_SIZE) { + if (vp2bo(um->um_devvp)->bo_bsize <= PAGE_SIZE) { error = vnode_pager_generic_getpages(vp, ap->a_m, ap->a_count, ap->a_rbehind, ap->a_rahead, ap->a_iodone, ap->a_arg); if (error == 0) diff --git a/sys/ufs/ufs/ufs_bmap.c b/sys/ufs/ufs/ufs_bmap.c --- a/sys/ufs/ufs/ufs_bmap.c +++ b/sys/ufs/ufs/ufs_bmap.c @@ -83,7 +83,7 @@ * to physical mapping is requested. */ if (ap->a_bop != NULL) - *ap->a_bop = &VFSTOUFS(ap->a_vp->v_mount)->um_devvp->v_bufobj; + *ap->a_bop = vp2bo(VFSTOUFS(ap->a_vp->v_mount)->um_devvp); if (ap->a_bnp == NULL) return (0); @@ -253,7 +253,8 @@ */ metalbn = ap->in_lbn; - if ((daddr == 0 && !incore(&vp->v_bufobj, metalbn)) || metalbn == bn) + if ((daddr == 0 && !incore(vp2bo(vp), metalbn)) || + metalbn == bn) break; /* * If we get here, we've either got the block in the cache diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c --- a/sys/vm/swap_pager.c +++ b/sys/vm/swap_pager.c @@ -2990,10 +2990,10 @@ if (bp->b_iocmd == BIO_WRITE) { if (bp->b_bufobj) bufobj_wdrop(bp->b_bufobj); - bufobj_wref(&vp2->v_bufobj); + bufobj_wref(vp2bo(vp2)); } - if (bp->b_bufobj != &vp2->v_bufobj) - bp->b_bufobj = &vp2->v_bufobj; + if (bp->b_bufobj != vp2bo(vp2)) + bp->b_bufobj = vp2bo(vp2); bp->b_vp = vp2; bp->b_iooffset = dbtob(bp->b_blkno); bstrategy(bp); diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c --- a/sys/vm/vm_pager.c +++ b/sys/vm/vm_pager.c @@ -444,7 +444,7 @@ bp->b_vp = vp; bp->b_flags |= B_PAGING; - bp->b_bufobj = &vp->v_bufobj; + bp->b_bufobj = vp2bo(vp); } /* diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -191,6 +191,7 @@ vnode_destroy_vobject(struct vnode *vp) { struct vm_object *obj; + struct bufobj *bo; obj = vp->v_object; if (obj == NULL || obj->handle != vp) @@ -210,11 +211,13 @@ vm_object_page_clean(obj, 0, 0, OBJPC_SYNC); VM_OBJECT_WUNLOCK(obj); - vinvalbuf(vp, V_SAVE, 0, 0); - - BO_LOCK(&vp->v_bufobj); - vp->v_bufobj.bo_flag |= BO_DEAD; - BO_UNLOCK(&vp->v_bufobj); + if ((vp->v_irflag & VIRF_BUFOBJ) != 0) { + vinvalbuf(vp, V_SAVE, 0, 0); + bo = vp2bo(vp); + BO_LOCK(bo); + bo->bo_flag |= BO_DEAD; + BO_UNLOCK(bo); + } VM_OBJECT_WLOCK(obj); vm_object_terminate(obj);