Changeset View
Changeset View
Standalone View
Standalone View
sys/kern/vfs_subr.c
Show First 20 Lines • Show All 114 Lines • ▼ Show 20 Lines | |||||
static void vfs_knllock(void *arg); | static void vfs_knllock(void *arg); | ||||
static void vfs_knlunlock(void *arg); | static void vfs_knlunlock(void *arg); | ||||
static void vfs_knl_assert_lock(void *arg, int what); | static void vfs_knl_assert_lock(void *arg, int what); | ||||
static void destroy_vpollinfo(struct vpollinfo *vi); | static void destroy_vpollinfo(struct vpollinfo *vi); | ||||
static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, | static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, | ||||
daddr_t startlbn, daddr_t endlbn); | daddr_t startlbn, daddr_t endlbn); | ||||
static void vnlru_recalc(void); | static void vnlru_recalc(void); | ||||
static struct vop_vector sync_vnodeops; | |||||
/* | /* | ||||
* These fences are intended for cases where some synchronization is | * These fences are intended for cases where some synchronization is | ||||
* needed between access of v_iflags and lockless vnode refcount (v_holdcnt | * needed between access of v_iflags and lockless vnode refcount (v_holdcnt | ||||
* and v_usecount) updates. Access to v_iflags is generally synchronized | * and v_usecount) updates. Access to v_iflags is generally synchronized | ||||
* by the interlock, but we have some internal assertions that check vnode | * by the interlock, but we have some internal assertions that check vnode | ||||
* flags without acquiring the lock. Thus, these fences are INVARIANTS-only | * flags without acquiring the lock. Thus, these fences are INVARIANTS-only | ||||
* for now. | * for now. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 93 Lines • ▼ Show 20 Lines | |||||
/* Publicly exported FS */ | /* Publicly exported FS */ | ||||
struct nfs_public nfs_pub; | struct nfs_public nfs_pub; | ||||
static uma_zone_t buf_trie_zone; | static uma_zone_t buf_trie_zone; | ||||
static smr_t buf_trie_smr; | static smr_t buf_trie_smr; | ||||
/* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ | /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ | ||||
static uma_zone_t vnode_zone; | static uma_zone_t vnode_zone; | ||||
static uma_zone_t vnode_bo_zone; | |||||
MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll"); | MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll"); | ||||
__read_frequently smr_t vfs_smr; | __read_frequently smr_t vfs_smr; | ||||
/* | /* | ||||
* The workitem queue. | * The workitem queue. | ||||
* | * | ||||
* It is useful to delay writes of file data and filesystem metadata | * It is useful to delay writes of file data and filesystem metadata | ||||
▲ Show 20 Lines • Show All 303 Lines • ▼ Show 20 Lines | vnode_init(void *mem, int size, int flags) | ||||
vp->v_vnlock = &vp->v_lock; | vp->v_vnlock = &vp->v_lock; | ||||
mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); | mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); | ||||
/* | /* | ||||
* By default, don't allow shared locks unless filesystems opt-in. | * By default, don't allow shared locks unless filesystems opt-in. | ||||
*/ | */ | ||||
lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, | lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, | ||||
LK_NOSHARE | LK_IS_VNODE); | LK_NOSHARE | LK_IS_VNODE); | ||||
/* | /* | ||||
* Initialize bufobj. | |||||
*/ | |||||
bufobj_init(&vp->v_bufobj, vp); | |||||
/* | |||||
* Initialize namecache. | * Initialize namecache. | ||||
*/ | */ | ||||
cache_vnode_init(vp); | cache_vnode_init(vp); | ||||
/* | /* | ||||
* Initialize rangelocks. | * Initialize rangelocks. | ||||
*/ | */ | ||||
rangelock_init(&vp->v_rl); | rangelock_init(&vp->v_rl); | ||||
vp->v_dbatchcpu = NOCPU; | vp->v_dbatchcpu = NOCPU; | ||||
/* | /* | ||||
* Check vhold_recycle_free for an explanation. | * Check vhold_recycle_free for an explanation. | ||||
*/ | */ | ||||
vp->v_holdcnt = VHOLD_NO_SMR; | vp->v_holdcnt = VHOLD_NO_SMR; | ||||
vp->v_type = VNON; | vp->v_type = VNON; | ||||
mtx_lock(&vnode_list_mtx); | mtx_lock(&vnode_list_mtx); | ||||
TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); | TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); | ||||
mtx_unlock(&vnode_list_mtx); | mtx_unlock(&vnode_list_mtx); | ||||
return (0); | return (0); | ||||
} | } | ||||
static int | |||||
vnode_init_bo(void *mem, int size, int flags) | |||||
{ | |||||
struct vnode_bo *vpo; | |||||
/* | /* | ||||
* size includes the bufobj part, it is zeroed for us. | |||||
*/ | |||||
vnode_init(mem, size, flags); | |||||
vpo = mem; | |||||
vpo->vb_v.v_irflag |= VIRF_BUFOBJ; | |||||
/* | |||||
* Initialize bufobj. | |||||
*/ | |||||
bufobj_init(&vpo->vb_bo, &vpo->vb_v); | |||||
return (0); | |||||
} | |||||
/* | |||||
* Free a vnode when it is cleared from the zone. | * Free a vnode when it is cleared from the zone. | ||||
*/ | */ | ||||
static void | static void | ||||
vnode_fini(void *mem, int size) | vnode_fini(void *mem, int size) | ||||
{ | { | ||||
struct vnode *vp; | struct vnode *vp; | ||||
struct bufobj *bo; | |||||
vp = mem; | vp = mem; | ||||
vdbatch_dequeue(vp); | vdbatch_dequeue(vp); | ||||
mtx_lock(&vnode_list_mtx); | mtx_lock(&vnode_list_mtx); | ||||
TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); | TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); | ||||
mtx_unlock(&vnode_list_mtx); | mtx_unlock(&vnode_list_mtx); | ||||
rangelock_destroy(&vp->v_rl); | rangelock_destroy(&vp->v_rl); | ||||
lockdestroy(vp->v_vnlock); | lockdestroy(vp->v_vnlock); | ||||
mtx_destroy(&vp->v_interlock); | mtx_destroy(&vp->v_interlock); | ||||
bo = &vp->v_bufobj; | |||||
rw_destroy(BO_LOCKPTR(bo)); | |||||
} | } | ||||
static void | |||||
vnode_fini_bo(void *mem, int size) | |||||
{ | |||||
struct vnode_bo *vpo; | |||||
vnode_fini(mem, size); | |||||
vpo = mem; | |||||
MPASS((vpo->vb_v.v_irflag & VIRF_BUFOBJ) != 0); | |||||
rw_destroy(BO_LOCKPTR(&vpo->vb_bo)); | |||||
} | |||||
/* | /* | ||||
* Provide the size of NFS nclnode and NFS fh for calculation of the | * Provide the size of NFS nclnode and NFS fh for calculation of the | ||||
* vnode memory consumption. The size is specified directly to | * vnode memory consumption. The size is specified directly to | ||||
* eliminate dependency on NFS-private header. | * eliminate dependency on NFS-private header. | ||||
* | * | ||||
* Other filesystems may use bigger or smaller (like UFS and ZFS) | * Other filesystems may use bigger or smaller (like UFS and ZFS) | ||||
* private inode data, but the NFS-based estimation is ample enough. | * private inode data, but the NFS-based estimation is ample enough. | ||||
* Still, we care about differences in the size between 64- and 32-bit | * Still, we care about differences in the size between 64- and 32-bit | ||||
▲ Show 20 Lines • Show All 48 Lines • ▼ Show 20 Lines | vntblinit(void *dummy __unused) | ||||
*/ | */ | ||||
mtx_lock(&vnode_list_mtx); | mtx_lock(&vnode_list_mtx); | ||||
vnlru_recalc(); | vnlru_recalc(); | ||||
mtx_unlock(&vnode_list_mtx); | mtx_unlock(&vnode_list_mtx); | ||||
vnode_list_free_marker = vn_alloc_marker(NULL); | vnode_list_free_marker = vn_alloc_marker(NULL); | ||||
TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); | TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); | ||||
vnode_list_reclaim_marker = vn_alloc_marker(NULL); | vnode_list_reclaim_marker = vn_alloc_marker(NULL); | ||||
TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); | TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); | ||||
vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, | vnode_zone = uma_zcreate("VNODE", sizeof(struct vnode), NULL, NULL, | ||||
vnode_init, vnode_fini, UMA_ALIGN_PTR, 0); | vnode_init, vnode_fini, UMA_ALIGN_PTR, 0); | ||||
vnode_bo_zone = uma_zcreate("VNODEBO", sizeof(struct vnode_bo), NULL, | |||||
NULL, vnode_init_bo, vnode_fini_bo, UMA_ALIGN_PTR, 0); | |||||
uma_zone_set_smr(vnode_zone, vfs_smr); | uma_zone_set_smr(vnode_zone, vfs_smr); | ||||
uma_zone_set_smr(vnode_bo_zone, vfs_smr); | |||||
/* | /* | ||||
* Preallocate enough nodes to support one-per buf so that | * Preallocate enough nodes to support one-per buf so that | ||||
* we can not fail an insert. reassignbuf() callers can not | * we can not fail an insert. reassignbuf() callers can not | ||||
* tolerate the insertion failure. | * tolerate the insertion failure. | ||||
*/ | */ | ||||
buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), | buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), | ||||
NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, | NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, | ||||
UMA_ZONE_NOFREE | UMA_ZONE_SMR); | UMA_ZONE_NOFREE | UMA_ZONE_SMR); | ||||
▲ Show 20 Lines • Show All 913 Lines • ▼ Show 20 Lines | |||||
* all codepaths. | * all codepaths. | ||||
* | * | ||||
* The routine can try to free a vnode or stall for up to 1 second waiting for | * The routine can try to free a vnode or stall for up to 1 second waiting for | ||||
* vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. | * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. | ||||
*/ | */ | ||||
static u_long vn_alloc_cyclecount; | static u_long vn_alloc_cyclecount; | ||||
static struct vnode * __noinline | static struct vnode * __noinline | ||||
vn_alloc_hard(struct mount *mp) | vn_alloc_hard(uma_zone_t zone, struct mount *mp) | ||||
{ | { | ||||
u_long rnumvnodes, rfreevnodes; | u_long rnumvnodes, rfreevnodes; | ||||
mtx_lock(&vnode_list_mtx); | mtx_lock(&vnode_list_mtx); | ||||
rnumvnodes = atomic_load_long(&numvnodes); | rnumvnodes = atomic_load_long(&numvnodes); | ||||
if (rnumvnodes + 1 < desiredvnodes) { | if (rnumvnodes + 1 < desiredvnodes) { | ||||
vn_alloc_cyclecount = 0; | vn_alloc_cyclecount = 0; | ||||
goto alloc; | goto alloc; | ||||
Show All 25 Lines | if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && | ||||
vnlru_read_freevnodes() > 1) | vnlru_read_freevnodes() > 1) | ||||
vnlru_free_locked(1, NULL); | vnlru_free_locked(1, NULL); | ||||
} | } | ||||
alloc: | alloc: | ||||
rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; | rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; | ||||
if (vnlru_under(rnumvnodes, vlowat)) | if (vnlru_under(rnumvnodes, vlowat)) | ||||
vnlru_kick(); | vnlru_kick(); | ||||
mtx_unlock(&vnode_list_mtx); | mtx_unlock(&vnode_list_mtx); | ||||
return (uma_zalloc_smr(vnode_zone, M_WAITOK)); | return (uma_zalloc_smr(zone, M_WAITOK)); | ||||
} | } | ||||
static struct vnode * | static struct vnode * | ||||
vn_alloc(struct mount *mp) | vn_alloc(struct mount *mp, bool need_bufobj) | ||||
{ | { | ||||
uma_zone_t zone; | |||||
u_long rnumvnodes; | u_long rnumvnodes; | ||||
zone = need_bufobj ? vnode_bo_zone : vnode_zone; | |||||
if (__predict_false(vn_alloc_cyclecount != 0)) | if (__predict_false(vn_alloc_cyclecount != 0)) | ||||
return (vn_alloc_hard(mp)); | return (vn_alloc_hard(zone, mp)); | ||||
rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; | rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; | ||||
if (__predict_false(vnlru_under_unlocked(rnumvnodes, vlowat))) { | if (__predict_false(vnlru_under_unlocked(rnumvnodes, vlowat))) { | ||||
atomic_subtract_long(&numvnodes, 1); | atomic_subtract_long(&numvnodes, 1); | ||||
return (vn_alloc_hard(mp)); | return (vn_alloc_hard(zone, mp)); | ||||
} | } | ||||
return (uma_zalloc_smr(vnode_zone, M_WAITOK)); | return (uma_zalloc_smr(zone, M_WAITOK)); | ||||
} | } | ||||
static void | static void | ||||
vn_free(struct vnode *vp) | vn_free(struct vnode *vp) | ||||
{ | { | ||||
atomic_subtract_long(&numvnodes, 1); | atomic_subtract_long(&numvnodes, 1); | ||||
uma_zfree_smr(vnode_zone, vp); | uma_zfree_smr((vp->v_irflag & VIRF_BUFOBJ) != 0 ? vnode_bo_zone : | ||||
vnode_zone, vp); | |||||
} | } | ||||
/* | /* | ||||
* Return the next vnode from the free list. | * Return the next vnode from the free list. | ||||
*/ | */ | ||||
int | int | ||||
getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, | getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, | ||||
struct vnode **vpp) | struct vnode **vpp) | ||||
{ | { | ||||
struct vnode *vp; | struct vnode *vp; | ||||
struct thread *td; | struct thread *td; | ||||
struct lock_object *lo; | struct lock_object *lo; | ||||
bool need_bufobj; | |||||
CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); | CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); | ||||
KASSERT(vops->registered, | KASSERT(vops->registered, | ||||
("%s: not registered vector op %p\n", __func__, vops)); | ("%s: not registered vector op %p\n", __func__, vops)); | ||||
need_bufobj = (mp != NULL && | |||||
(mp->mnt_kern_flag & MNTK_USES_BCACHE) != 0) || | |||||
vops == &sync_vnodeops; | |||||
td = curthread; | td = curthread; | ||||
if (td->td_vp_reserved != NULL) { | if (td->td_vp_reserved != NULL) { | ||||
vp = td->td_vp_reserved; | vp = td->td_vp_reserved; | ||||
td->td_vp_reserved = NULL; | td->td_vp_reserved = NULL; | ||||
VNASSERT(need_bufobj == ((vp->v_irflag & VIRF_BUFOBJ) != 0), | |||||
vp, ("need_bufobj")); | |||||
} else { | } else { | ||||
vp = vn_alloc(mp); | vp = vn_alloc(mp, need_bufobj); | ||||
} | } | ||||
counter_u64_add(vnodes_created, 1); | counter_u64_add(vnodes_created, 1); | ||||
/* | /* | ||||
* Locks are given the generic name "vnode" when created. | * Locks are given the generic name "vnode" when created. | ||||
* Follow the historic practice of using the filesystem | * Follow the historic practice of using the filesystem | ||||
* name when they allocated, e.g., "zfs", "ufs", "nfs, etc. | * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. | ||||
* | * | ||||
* Locks live in a witness group keyed on their name. Thus, | * Locks live in a witness group keyed on their name. Thus, | ||||
Show All 22 Lines | #endif | ||||
/* | /* | ||||
* Finalize various vnode identity bits. | * Finalize various vnode identity bits. | ||||
*/ | */ | ||||
KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); | KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); | ||||
KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); | KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); | ||||
KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); | KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); | ||||
vp->v_type = VNON; | vp->v_type = VNON; | ||||
vp->v_op = vops; | vp->v_op = vops; | ||||
vp->v_irflag = 0; | vp->v_irflag &= VIRF_BUFOBJ; | ||||
v_init_counters(vp); | v_init_counters(vp); | ||||
vn_seqc_init(vp); | vn_seqc_init(vp); | ||||
vp->v_bufobj.bo_ops = &buf_ops_bio; | if (need_bufobj) | ||||
vp2bo(vp)->bo_ops = &buf_ops_bio; | |||||
#ifdef DIAGNOSTIC | #ifdef DIAGNOSTIC | ||||
if (mp == NULL && vops != &dead_vnodeops) | if (mp == NULL && vops != &dead_vnodeops) | ||||
printf("NULL mp in getnewvnode(9), tag %s\n", tag); | printf("NULL mp in getnewvnode(9), tag %s\n", tag); | ||||
#endif | #endif | ||||
#ifdef MAC | #ifdef MAC | ||||
mac_vnode_init(vp); | mac_vnode_init(vp); | ||||
if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) | if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) | ||||
mac_vnode_associate_singlelabel(mp, vp); | mac_vnode_associate_singlelabel(mp, vp); | ||||
#endif | #endif | ||||
if (mp != NULL) { | if (mp != NULL) { | ||||
vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; | if (need_bufobj) | ||||
vp2bo(vp)->bo_bsize = mp->mnt_stat.f_iosize; | |||||
if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) | if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) | ||||
vp->v_vflag |= VV_NOKNOTE; | vp->v_vflag |= VV_NOKNOTE; | ||||
} | } | ||||
/* | /* | ||||
* For the filesystems which do not use vfs_hash_insert(), | * For the filesystems which do not use vfs_hash_insert(), | ||||
* still initialize v_hash to have vfs_hash_index() useful. | * still initialize v_hash to have vfs_hash_index() useful. | ||||
* E.g., nullfs uses vfs_hash_index() on the lower vnode for | * E.g., nullfs uses vfs_hash_index() on the lower vnode for | ||||
* its own hashing. | * its own hashing. | ||||
*/ | */ | ||||
vp->v_hash = (uintptr_t)vp >> vnsz2log; | vp->v_hash = (uintptr_t)vp >> vnsz2log; | ||||
*vpp = vp; | *vpp = vp; | ||||
return (0); | return (0); | ||||
} | } | ||||
void | void | ||||
getnewvnode_reserve(void) | getnewvnode_reserve(struct mount *mp) | ||||
{ | { | ||||
struct thread *td; | struct thread *td; | ||||
td = curthread; | td = curthread; | ||||
MPASS(td->td_vp_reserved == NULL); | MPASS(td->td_vp_reserved == NULL); | ||||
td->td_vp_reserved = vn_alloc(NULL); | td->td_vp_reserved = vn_alloc(NULL, mp != NULL && | ||||
(mp->mnt_kern_flag & MNTK_USES_BCACHE) != 0); | |||||
} | } | ||||
void | void | ||||
getnewvnode_drop_reserve(void) | getnewvnode_drop_reserve(void) | ||||
{ | { | ||||
struct thread *td; | struct thread *td; | ||||
td = curthread; | td = curthread; | ||||
Show All 18 Lines | freevnode(struct vnode *vp) | ||||
* so as not to contaminate the freshly allocated vnode. | * so as not to contaminate the freshly allocated vnode. | ||||
*/ | */ | ||||
CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); | CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); | ||||
/* | /* | ||||
* Paired with vgone. | * Paired with vgone. | ||||
*/ | */ | ||||
vn_seqc_write_end_free(vp); | vn_seqc_write_end_free(vp); | ||||
bo = &vp->v_bufobj; | bo = (vp->v_irflag & VIRF_BUFOBJ) != 0 ? vp2bo(vp) : NULL; | ||||
VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); | VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); | ||||
VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); | VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); | ||||
VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); | VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); | ||||
VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); | VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); | ||||
VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); | VNASSERT(bo == NULL || bo->bo_numoutput == 0, vp, | ||||
VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); | ("Clean vnode has pending I/O's")); | ||||
VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, | VNASSERT(bo == NULL || bo->bo_clean.bv_cnt == 0, vp, | ||||
("cleanbufcnt not 0")); | |||||
VNASSERT(bo == NULL || pctrie_is_empty(&bo->bo_clean.bv_root), vp, | |||||
("clean blk trie not empty")); | ("clean blk trie not empty")); | ||||
VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); | VNASSERT(bo == NULL || bo->bo_dirty.bv_cnt == 0, vp, | ||||
VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, | ("dirtybufcnt not 0")); | ||||
VNASSERT(bo == NULL || pctrie_is_empty(&bo->bo_dirty.bv_root), vp, | |||||
("dirty blk trie not empty")); | ("dirty blk trie not empty")); | ||||
VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); | VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); | ||||
VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); | VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); | ||||
VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); | VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); | ||||
VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, | VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, | ||||
("Dangling rangelock waiters")); | ("Dangling rangelock waiters")); | ||||
VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp, | VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp, | ||||
("Leaked inactivation")); | ("Leaked inactivation")); | ||||
VI_UNLOCK(vp); | VI_UNLOCK(vp); | ||||
#ifdef MAC | #ifdef MAC | ||||
mac_vnode_destroy(vp); | mac_vnode_destroy(vp); | ||||
#endif | #endif | ||||
if (vp->v_pollinfo != NULL) { | if (vp->v_pollinfo != NULL) { | ||||
destroy_vpollinfo(vp->v_pollinfo); | destroy_vpollinfo(vp->v_pollinfo); | ||||
vp->v_pollinfo = NULL; | vp->v_pollinfo = NULL; | ||||
} | } | ||||
vp->v_mountedhere = NULL; | vp->v_mountedhere = NULL; | ||||
vp->v_unpcb = NULL; | vp->v_unpcb = NULL; | ||||
vp->v_rdev = NULL; | vp->v_rdev = NULL; | ||||
vp->v_fifoinfo = NULL; | vp->v_fifoinfo = NULL; | ||||
vp->v_iflag = 0; | vp->v_iflag = 0; | ||||
vp->v_vflag = 0; | vp->v_vflag = 0; | ||||
if (bo != NULL) | |||||
bo->bo_flag = 0; | bo->bo_flag = 0; | ||||
vn_free(vp); | vn_free(vp); | ||||
} | } | ||||
/* | /* | ||||
* Delete from old mount point vnode list, if on one. | * Delete from old mount point vnode list, if on one. | ||||
*/ | */ | ||||
static void | static void | ||||
delmntque(struct vnode *vp) | delmntque(struct vnode *vp) | ||||
▲ Show 20 Lines • Show All 79 Lines • ▼ Show 20 Lines | insmntque(struct vnode *vp, struct mount *mp) | ||||
return (insmntque1(vp, mp, insmntque_stddtr, NULL)); | return (insmntque1(vp, mp, insmntque_stddtr, NULL)); | ||||
} | } | ||||
/* | /* | ||||
* Flush out and invalidate all buffers associated with a bufobj | * Flush out and invalidate all buffers associated with a bufobj | ||||
* Called with the underlying object locked. | * Called with the underlying object locked. | ||||
*/ | */ | ||||
int | int | ||||
bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) | bufobj_invalbuf(vm_object_t obj, struct bufobj *bo, int flags, int slpflag, | ||||
int slptimeo) | |||||
{ | { | ||||
int error; | int error; | ||||
BO_LOCK(bo); | BO_LOCK(bo); | ||||
if (flags & V_SAVE) { | if (flags & V_SAVE) { | ||||
error = bufobj_wwait(bo, slpflag, slptimeo); | error = bufobj_wwait(bo, slpflag, slptimeo); | ||||
if (error) { | if (error) { | ||||
BO_UNLOCK(bo); | BO_UNLOCK(bo); | ||||
Show All 34 Lines | bufobj_invalbuf(vm_object_t obj, struct bufobj *bo, int flags, int slpflag, | ||||
/* | /* | ||||
* Wait for I/O to complete. XXX needs cleaning up. The vnode can | * Wait for I/O to complete. XXX needs cleaning up. The vnode can | ||||
* have write I/O in-progress but if there is a VM object then the | * have write I/O in-progress but if there is a VM object then the | ||||
* VM object can also have read-I/O in-progress. | * VM object can also have read-I/O in-progress. | ||||
*/ | */ | ||||
do { | do { | ||||
bufobj_wwait(bo, 0, 0); | bufobj_wwait(bo, 0, 0); | ||||
if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { | if ((flags & V_VMIO) == 0 && obj != NULL) { | ||||
BO_UNLOCK(bo); | BO_UNLOCK(bo); | ||||
vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); | vm_object_pip_wait_unlocked(obj, "bovlbx"); | ||||
BO_LOCK(bo); | BO_LOCK(bo); | ||||
} | } | ||||
} while (bo->bo_numoutput > 0); | } while (bo->bo_numoutput > 0); | ||||
BO_UNLOCK(bo); | BO_UNLOCK(bo); | ||||
/* | /* | ||||
* Destroy the copy in the VM cache, too. | * Destroy the copy in the VM cache, too. | ||||
*/ | */ | ||||
if (bo->bo_object != NULL && | if (obj != NULL && | ||||
(flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { | (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { | ||||
VM_OBJECT_WLOCK(bo->bo_object); | VM_OBJECT_WLOCK(obj); | ||||
vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? | vm_object_page_remove(obj, 0, 0, (flags & V_SAVE) ? | ||||
OBJPR_CLEANONLY : 0); | OBJPR_CLEANONLY : 0); | ||||
VM_OBJECT_WUNLOCK(bo->bo_object); | VM_OBJECT_WUNLOCK(obj); | ||||
} | } | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
BO_LOCK(bo); | BO_LOCK(bo); | ||||
if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | | if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | | ||||
V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || | V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || | ||||
bo->bo_clean.bv_cnt > 0)) | bo->bo_clean.bv_cnt > 0)) | ||||
panic("vinvalbuf: flush failed"); | panic("vinvalbuf: flush failed"); | ||||
if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && | if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && | ||||
bo->bo_dirty.bv_cnt > 0) | bo->bo_dirty.bv_cnt > 0) | ||||
panic("vinvalbuf: flush dirty failed"); | panic("vinvalbuf: flush dirty failed"); | ||||
BO_UNLOCK(bo); | BO_UNLOCK(bo); | ||||
#endif | #endif | ||||
return (0); | return (0); | ||||
} | } | ||||
/* | /* | ||||
* Flush out and invalidate all buffers associated with a vnode. | * Flush out and invalidate all buffers associated with a vnode. | ||||
* Called with the underlying object locked. | * Called with the underlying object locked. | ||||
*/ | */ | ||||
int | int | ||||
vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) | vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) | ||||
{ | { | ||||
vm_object_t obj; | |||||
CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); | CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); | ||||
ASSERT_VOP_LOCKED(vp, "vinvalbuf"); | ASSERT_VOP_LOCKED(vp, "vinvalbuf"); | ||||
mckusick: Seems like a KASSERT that the vnode has its VIRF_BUFOBJ flag set would be useful. | |||||
Done Inline Actionsvp2bo() accessor already has the assert built-in. It should not be checked for the case of nullfs (handle != vp case). So I do not see a need in adding yet another assert. kib: vp2bo() accessor already has the assert built-in. It should not be checked for the case of… | |||||
if (vp->v_object != NULL && vp->v_object->handle != vp) | |||||
obj = vp->v_object; | |||||
if (obj != NULL && obj->handle != vp) | |||||
return (0); | return (0); | ||||
return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); | return (bufobj_invalbuf(obj, vp2bo(vp), flags, slpflag, slptimeo)); | ||||
} | } | ||||
/* | /* | ||||
* Flush out buffers on the specified list. | * Flush out buffers on the specified list. | ||||
* | * | ||||
*/ | */ | ||||
static int | static int | ||||
flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, | flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, | ||||
▲ Show 20 Lines • Show All 125 Lines • ▼ Show 20 Lines | vtruncbuf(struct vnode *vp, off_t length, int blksize) | ||||
/* | /* | ||||
* Round up to the *next* lbn. | * Round up to the *next* lbn. | ||||
*/ | */ | ||||
startlbn = howmany(length, blksize); | startlbn = howmany(length, blksize); | ||||
ASSERT_VOP_LOCKED(vp, "vtruncbuf"); | ASSERT_VOP_LOCKED(vp, "vtruncbuf"); | ||||
bo = &vp->v_bufobj; | bo = vp2bo(vp); | ||||
restart_unlocked: | restart_unlocked: | ||||
BO_LOCK(bo); | BO_LOCK(bo); | ||||
while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) | while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) | ||||
; | ; | ||||
if (length > 0) { | if (length > 0) { | ||||
restartsync: | restartsync: | ||||
Show All 37 Lines | v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, | ||||
struct bufobj *bo; | struct bufobj *bo; | ||||
off_t start, end; | off_t start, end; | ||||
ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); | ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); | ||||
start = blksize * startlbn; | start = blksize * startlbn; | ||||
end = blksize * endlbn; | end = blksize * endlbn; | ||||
bo = &vp->v_bufobj; | bo = vp2bo(vp); | ||||
BO_LOCK(bo); | BO_LOCK(bo); | ||||
MPASS(blksize == bo->bo_bsize); | MPASS(blksize == bo->bo_bsize); | ||||
while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) | while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) | ||||
; | ; | ||||
BO_UNLOCK(bo); | BO_UNLOCK(bo); | ||||
vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); | vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); | ||||
▲ Show 20 Lines • Show All 164 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Associate a buffer with a vnode. | * Associate a buffer with a vnode. | ||||
*/ | */ | ||||
void | void | ||||
bgetvp(struct vnode *vp, struct buf *bp) | bgetvp(struct vnode *vp, struct buf *bp) | ||||
{ | { | ||||
struct bufobj *bo; | struct bufobj *bo; | ||||
bo = &vp->v_bufobj; | bo = vp2bo(vp); | ||||
ASSERT_BO_WLOCKED(bo); | ASSERT_BO_WLOCKED(bo); | ||||
VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); | VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); | ||||
CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); | CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); | ||||
VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, | VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, | ||||
("bgetvp: bp already attached! %p", bp)); | ("bgetvp: bp already attached! %p", bp)); | ||||
vhold(vp); | vhold(vp); | ||||
Show All 16 Lines | brelvp(struct buf *bp) | ||||
CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); | CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); | ||||
KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); | KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); | ||||
/* | /* | ||||
* Delete from old vnode list, if on one. | * Delete from old vnode list, if on one. | ||||
*/ | */ | ||||
vp = bp->b_vp; /* XXX */ | vp = bp->b_vp; /* XXX */ | ||||
bo = bp->b_bufobj; | bo = vp2bo(vp); | ||||
BO_LOCK(bo); | BO_LOCK(bo); | ||||
buf_vlist_remove(bp); | buf_vlist_remove(bp); | ||||
if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { | if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { | ||||
bo->bo_flag &= ~BO_ONWORKLST; | bo->bo_flag &= ~BO_ONWORKLST; | ||||
mtx_lock(&sync_mtx); | mtx_lock(&sync_mtx); | ||||
LIST_REMOVE(bo, bo_synclist); | LIST_REMOVE(bo, bo_synclist); | ||||
syncer_worklist_len--; | syncer_worklist_len--; | ||||
mtx_unlock(&sync_mtx); | mtx_unlock(&sync_mtx); | ||||
▲ Show 20 Lines • Show All 1,427 Lines • ▼ Show 20 Lines | |||||
* vgone, with the vp interlock held. | * vgone, with the vp interlock held. | ||||
*/ | */ | ||||
static void | static void | ||||
vgonel(struct vnode *vp) | vgonel(struct vnode *vp) | ||||
{ | { | ||||
struct thread *td; | struct thread *td; | ||||
struct mount *mp; | struct mount *mp; | ||||
vm_object_t object; | vm_object_t object; | ||||
struct bufobj *bo; | |||||
bool active, doinginact, oweinact; | bool active, doinginact, oweinact; | ||||
ASSERT_VOP_ELOCKED(vp, "vgonel"); | ASSERT_VOP_ELOCKED(vp, "vgonel"); | ||||
ASSERT_VI_LOCKED(vp, "vgonel"); | ASSERT_VI_LOCKED(vp, "vgonel"); | ||||
VNASSERT(vp->v_holdcnt, vp, | VNASSERT(vp->v_holdcnt, vp, | ||||
("vgonel: vp %p has no reference.", vp)); | ("vgonel: vp %p has no reference.", vp)); | ||||
CTR2(KTR_VFS, "%s: vp %p", __func__, vp); | CTR2(KTR_VFS, "%s: vp %p", __func__, vp); | ||||
td = curthread; | td = curthread; | ||||
▲ Show 20 Lines • Show All 55 Lines • ▼ Show 20 Lines | vgonel(struct vnode *vp) | ||||
if (vp->v_type == VSOCK) | if (vp->v_type == VSOCK) | ||||
vfs_unp_reclaim(vp); | vfs_unp_reclaim(vp); | ||||
/* | /* | ||||
* Clean out any buffers associated with the vnode. | * Clean out any buffers associated with the vnode. | ||||
* If the flush fails, just toss the buffers. | * If the flush fails, just toss the buffers. | ||||
*/ | */ | ||||
mp = NULL; | mp = NULL; | ||||
if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) | |||||
bo = (vp->v_irflag & VIRF_BUFOBJ) != 0 ? vp2bo(vp) : NULL; | |||||
object = vp->v_object; | |||||
if (bo != NULL) { | |||||
if (!TAILQ_EMPTY(&bo->bo_dirty.bv_hd)) | |||||
(void) vn_start_secondary_write(vp, &mp, V_WAIT); | (void) vn_start_secondary_write(vp, &mp, V_WAIT); | ||||
if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { | if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { | ||||
while (vinvalbuf(vp, 0, 0, 0) != 0) | while (vinvalbuf(vp, 0, 0, 0) != 0) | ||||
; | ; | ||||
} | } | ||||
BO_LOCK(&vp->v_bufobj); | BO_LOCK(bo); | ||||
KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && | KASSERT(TAILQ_EMPTY(&bo->bo_dirty.bv_hd) && | ||||
vp->v_bufobj.bo_dirty.bv_cnt == 0 && | bo->bo_dirty.bv_cnt == 0 && | ||||
TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && | TAILQ_EMPTY(&bo->bo_clean.bv_hd) && | ||||
vp->v_bufobj.bo_clean.bv_cnt == 0, | bo->bo_clean.bv_cnt == 0, | ||||
("vp %p bufobj not invalidated", vp)); | ("vp %p bufobj not invalidated", vp)); | ||||
/* | /* | ||||
* For VMIO bufobj, BO_DEAD is set later, or in | * For VMIO bufobj, BO_DEAD is set later, or in | ||||
* vm_object_terminate() after the object's page queue is | * vm_object_terminate() after the object's page queue is | ||||
* flushed. | * flushed. | ||||
*/ | */ | ||||
object = vp->v_bufobj.bo_object; | |||||
if (object == NULL) | if (object == NULL) | ||||
vp->v_bufobj.bo_flag |= BO_DEAD; | bo->bo_flag |= BO_DEAD; | ||||
BO_UNLOCK(&vp->v_bufobj); | BO_UNLOCK(bo); | ||||
} | |||||
/* | /* | ||||
* Handle the VM part. Tmpfs handles v_object on its own (the | * Handle the VM part. Tmpfs handles v_object on its own (the | ||||
* OBJT_VNODE check). Nullfs or other bypassing filesystems | * OBJT_VNODE check). Nullfs or other bypassing filesystems | ||||
* should not touch the object borrowed from the lower vnode | * should not touch the object borrowed from the lower vnode | ||||
* (the handle check). | * (the handle check). | ||||
*/ | */ | ||||
if (object != NULL && object->type == OBJT_VNODE && | if (object != NULL && object->type == OBJT_VNODE && | ||||
Show All 36 Lines | |||||
"VMARKER"}; | "VMARKER"}; | ||||
_Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, | _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, | ||||
"new hold count flag not added to vn_printf"); | "new hold count flag not added to vn_printf"); | ||||
void | void | ||||
vn_printf(struct vnode *vp, const char *fmt, ...) | vn_printf(struct vnode *vp, const char *fmt, ...) | ||||
{ | { | ||||
struct bufobj *bo; | |||||
va_list ap; | va_list ap; | ||||
char buf[256], buf2[16]; | char buf[256], buf2[16]; | ||||
u_long flags; | u_long flags; | ||||
u_int holdcnt; | u_int holdcnt; | ||||
short irflag; | short irflag; | ||||
va_start(ap, fmt); | va_start(ap, fmt); | ||||
vprintf(fmt, ap); | vprintf(fmt, ap); | ||||
Show All 31 Lines | vn_printf(struct vnode *vp, const char *fmt, ...) | ||||
buf[1] = '\0'; | buf[1] = '\0'; | ||||
irflag = vn_irflag_read(vp); | irflag = vn_irflag_read(vp); | ||||
if (irflag & VIRF_DOOMED) | if (irflag & VIRF_DOOMED) | ||||
strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); | strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); | ||||
if (irflag & VIRF_PGREAD) | if (irflag & VIRF_PGREAD) | ||||
strlcat(buf, "|VIRF_PGREAD", sizeof(buf)); | strlcat(buf, "|VIRF_PGREAD", sizeof(buf)); | ||||
if (irflag & VIRF_MOUNTPOINT) | if (irflag & VIRF_MOUNTPOINT) | ||||
strlcat(buf, "|VIRF_MOUNTPOINT", sizeof(buf)); | strlcat(buf, "|VIRF_MOUNTPOINT", sizeof(buf)); | ||||
flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD | VIRF_MOUNTPOINT); | if (irflag & VIRF_BUFOBJ) | ||||
strlcat(buf, "|VIRF_BUFOBJ", sizeof(buf)); | |||||
flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD | VIRF_MOUNTPOINT | | |||||
VIRF_BUFOBJ); | |||||
if (flags != 0) { | if (flags != 0) { | ||||
snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); | snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); | ||||
strlcat(buf, buf2, sizeof(buf)); | strlcat(buf, buf2, sizeof(buf)); | ||||
} | } | ||||
if (vp->v_vflag & VV_ROOT) | if (vp->v_vflag & VV_ROOT) | ||||
strlcat(buf, "|VV_ROOT", sizeof(buf)); | strlcat(buf, "|VV_ROOT", sizeof(buf)); | ||||
if (vp->v_vflag & VV_ISTTY) | if (vp->v_vflag & VV_ISTTY) | ||||
strlcat(buf, "|VV_ISTTY", sizeof(buf)); | strlcat(buf, "|VV_ISTTY", sizeof(buf)); | ||||
▲ Show 20 Lines • Show All 53 Lines • ▼ Show 20 Lines | vn_printf(struct vnode *vp, const char *fmt, ...) | ||||
if (flags != 0) { | if (flags != 0) { | ||||
snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); | snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); | ||||
strlcat(buf, buf2, sizeof(buf)); | strlcat(buf, buf2, sizeof(buf)); | ||||
} | } | ||||
printf(" flags (%s)", buf + 1); | printf(" flags (%s)", buf + 1); | ||||
if (mtx_owned(VI_MTX(vp))) | if (mtx_owned(VI_MTX(vp))) | ||||
printf(" VI_LOCKed"); | printf(" VI_LOCKed"); | ||||
printf("\n"); | printf("\n"); | ||||
if (vp->v_object != NULL) | if (vp->v_object != NULL) { | ||||
printf(" v_object %p ref %d pages %d " | printf(" v_object %p ref %d pages %d", | ||||
"cleanbuf %d dirtybuf %d\n", | |||||
vp->v_object, vp->v_object->ref_count, | vp->v_object, vp->v_object->ref_count, | ||||
vp->v_object->resident_page_count, | vp->v_object->resident_page_count); | ||||
vp->v_bufobj.bo_clean.bv_cnt, | } | ||||
vp->v_bufobj.bo_dirty.bv_cnt); | bo = (vp->v_irflag & VIRF_BUFOBJ) != 0 ? vp2bo(vp) : NULL; | ||||
printf(" "); | if (bo != NULL) { | ||||
printf(" cleanbuf %d dirtybuf %d", | |||||
bo->bo_clean.bv_cnt, bo->bo_dirty.bv_cnt); | |||||
} | |||||
printf("\n "); | |||||
lockmgr_printinfo(vp->v_vnlock); | lockmgr_printinfo(vp->v_vnlock); | ||||
if (vp->v_data != NULL) | if (vp->v_data != NULL) | ||||
VOP_PRINT(vp); | VOP_PRINT(vp); | ||||
} | } | ||||
#ifdef DDB | #ifdef DDB | ||||
/* | /* | ||||
* List all of the locked vnodes in the system. | * List all of the locked vnodes in the system. | ||||
▲ Show 20 Lines • Show All 786 Lines • ▼ Show 20 Lines | if (next == 0 || next > syncer_maxdelay) { | ||||
start /= 2; | start /= 2; | ||||
incr /= 2; | incr /= 2; | ||||
if (start == 0) { | if (start == 0) { | ||||
start = syncer_maxdelay / 2; | start = syncer_maxdelay / 2; | ||||
incr = syncer_maxdelay; | incr = syncer_maxdelay; | ||||
} | } | ||||
next = start; | next = start; | ||||
} | } | ||||
bo = &vp->v_bufobj; | bo = vp2bo(vp); | ||||
BO_LOCK(bo); | BO_LOCK(bo); | ||||
vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); | vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); | ||||
/* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ | /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ | ||||
mtx_lock(&sync_mtx); | mtx_lock(&sync_mtx); | ||||
sync_vnode_count++; | sync_vnode_count++; | ||||
if (mp->mnt_syncer == NULL) { | if (mp->mnt_syncer == NULL) { | ||||
mp->mnt_syncer = vp; | mp->mnt_syncer = vp; | ||||
vp = NULL; | vp = NULL; | ||||
Show All 36 Lines | sync_fsync(struct vop_fsync_args *ap) | ||||
* We only need to do something if this is a lazy evaluation. | * We only need to do something if this is a lazy evaluation. | ||||
*/ | */ | ||||
if (ap->a_waitfor != MNT_LAZY) | if (ap->a_waitfor != MNT_LAZY) | ||||
return (0); | return (0); | ||||
/* | /* | ||||
* Move ourselves to the back of the sync list. | * Move ourselves to the back of the sync list. | ||||
*/ | */ | ||||
bo = &syncvp->v_bufobj; | bo = vp2bo(syncvp); | ||||
BO_LOCK(bo); | BO_LOCK(bo); | ||||
vn_syncer_add_to_worklist(bo, syncdelay); | vn_syncer_add_to_worklist(bo, syncdelay); | ||||
BO_UNLOCK(bo); | BO_UNLOCK(bo); | ||||
/* | /* | ||||
* Walk the list of vnodes pushing all that are dirty and | * Walk the list of vnodes pushing all that are dirty and | ||||
* not already on the sync list. | * not already on the sync list. | ||||
*/ | */ | ||||
Show All 33 Lines | |||||
* Modifications to the worklist must be protected by sync_mtx. | * Modifications to the worklist must be protected by sync_mtx. | ||||
*/ | */ | ||||
static int | static int | ||||
sync_reclaim(struct vop_reclaim_args *ap) | sync_reclaim(struct vop_reclaim_args *ap) | ||||
{ | { | ||||
struct vnode *vp = ap->a_vp; | struct vnode *vp = ap->a_vp; | ||||
struct bufobj *bo; | struct bufobj *bo; | ||||
bo = &vp->v_bufobj; | bo = vp2bo(vp); | ||||
BO_LOCK(bo); | BO_LOCK(bo); | ||||
mtx_lock(&sync_mtx); | mtx_lock(&sync_mtx); | ||||
if (vp->v_mount->mnt_syncer == vp) | if (vp->v_mount->mnt_syncer == vp) | ||||
vp->v_mount->mnt_syncer = NULL; | vp->v_mount->mnt_syncer = NULL; | ||||
if (bo->bo_flag & BO_ONWORKLST) { | if (bo->bo_flag & BO_ONWORKLST) { | ||||
LIST_REMOVE(bo, bo_synclist); | LIST_REMOVE(bo, bo_synclist); | ||||
syncer_worklist_len--; | syncer_worklist_len--; | ||||
sync_vnode_count--; | sync_vnode_count--; | ||||
▲ Show 20 Lines • Show All 1,862 Lines • Show Last 20 Lines |
Seems like a KASSERT that the vnode has its VIRF_BUFOBJ flag set would be useful.