Changeset View
Standalone View
sys/kern/vfs_subr.c
Show First 20 Lines • Show All 70 Lines • ▼ Show 20 Lines | |||||
#include <sys/namei.h> | #include <sys/namei.h> | ||||
#include <sys/pctrie.h> | #include <sys/pctrie.h> | ||||
#include <sys/priv.h> | #include <sys/priv.h> | ||||
#include <sys/reboot.h> | #include <sys/reboot.h> | ||||
#include <sys/refcount.h> | #include <sys/refcount.h> | ||||
#include <sys/rwlock.h> | #include <sys/rwlock.h> | ||||
#include <sys/sched.h> | #include <sys/sched.h> | ||||
#include <sys/sleepqueue.h> | #include <sys/sleepqueue.h> | ||||
#include <sys/smr.h> | |||||
#include <sys/smp.h> | #include <sys/smp.h> | ||||
#include <sys/stat.h> | #include <sys/stat.h> | ||||
#include <sys/sysctl.h> | #include <sys/sysctl.h> | ||||
#include <sys/syslog.h> | #include <sys/syslog.h> | ||||
#include <sys/vmmeter.h> | #include <sys/vmmeter.h> | ||||
#include <sys/vnode.h> | #include <sys/vnode.h> | ||||
#include <sys/watchdog.h> | #include <sys/watchdog.h> | ||||
▲ Show 20 Lines • Show All 146 Lines • ▼ Show 20 Lines | |||||
struct nfs_public nfs_pub; | struct nfs_public nfs_pub; | ||||
static uma_zone_t buf_trie_zone; | static uma_zone_t buf_trie_zone; | ||||
/* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ | /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ | ||||
static uma_zone_t vnode_zone; | static uma_zone_t vnode_zone; | ||||
static uma_zone_t vnodepoll_zone; | static uma_zone_t vnodepoll_zone; | ||||
__read_frequently smr_t vfs_smr; | |||||
/* | /* | ||||
* The workitem queue. | * The workitem queue. | ||||
* | * | ||||
* It is useful to delay writes of file data and filesystem metadata | * It is useful to delay writes of file data and filesystem metadata | ||||
* for tens of seconds so that quickly created and deleted files need | * for tens of seconds so that quickly created and deleted files need | ||||
* not waste disk bandwidth being created and removed. To realize this, | * not waste disk bandwidth being created and removed. To realize this, | ||||
* we append vnodes to a "workitem" queue. When running with a soft | * we append vnodes to a "workitem" queue. When running with a soft | ||||
* updates implementation, most pending metadata dependencies should | * updates implementation, most pending metadata dependencies should | ||||
▲ Show 20 Lines • Show All 407 Lines • ▼ Show 20 Lines | vntblinit(void *dummy __unused) | ||||
mtx_lock(&vnode_list_mtx); | mtx_lock(&vnode_list_mtx); | ||||
vnlru_recalc(); | vnlru_recalc(); | ||||
mtx_unlock(&vnode_list_mtx); | mtx_unlock(&vnode_list_mtx); | ||||
vnode_list_free_marker = vn_alloc_marker(NULL); | vnode_list_free_marker = vn_alloc_marker(NULL); | ||||
TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); | TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); | ||||
vnode_list_reclaim_marker = vn_alloc_marker(NULL); | vnode_list_reclaim_marker = vn_alloc_marker(NULL); | ||||
TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); | TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); | ||||
vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, | vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, | ||||
vnode_init, vnode_fini, UMA_ALIGN_PTR, 0); | vnode_init, vnode_fini, UMA_ALIGN_PTR, UMA_ZONE_SMR); | ||||
vfs_smr = uma_zone_get_smr(vnode_zone); | |||||
jeff: If you have ordering issues in later patches you can just manually smr_create() first thing and… | |||||
Done Inline ActionsI know, but I did not find a good place to do it yet. It's plausible I'll be able to reorder the problematic consumer. Another option is that perhaps there should be a well-defined "allocate global smrs here" place so that other work does not run into equivalent woes. mjg: I know, but I did not find a good place to do it yet. It's plausible I'll be able to reorder… | |||||
vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), | vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), | ||||
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); | NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); | ||||
/* | /* | ||||
* Preallocate enough nodes to support one-per buf so that | * Preallocate enough nodes to support one-per buf so that | ||||
* we can not fail an insert. reassignbuf() callers can not | * we can not fail an insert. reassignbuf() callers can not | ||||
* tolerate the insertion failure. | * tolerate the insertion failure. | ||||
*/ | */ | ||||
buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), | buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), | ||||
▲ Show 20 Lines • Show All 925 Lines • ▼ Show 20 Lines | if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && | ||||
vnlru_read_freevnodes() > 1) | vnlru_read_freevnodes() > 1) | ||||
vnlru_free_locked(1, NULL); | vnlru_free_locked(1, NULL); | ||||
} | } | ||||
alloc: | alloc: | ||||
rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; | rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; | ||||
if (vnlru_under(rnumvnodes, vlowat)) | if (vnlru_under(rnumvnodes, vlowat)) | ||||
vnlru_kick(); | vnlru_kick(); | ||||
mtx_unlock(&vnode_list_mtx); | mtx_unlock(&vnode_list_mtx); | ||||
return (uma_zalloc(vnode_zone, M_WAITOK)); | return (uma_zalloc_smr(vnode_zone, M_WAITOK)); | ||||
} | } | ||||
static struct vnode * | static struct vnode * | ||||
vn_alloc(struct mount *mp) | vn_alloc(struct mount *mp) | ||||
{ | { | ||||
u_long rnumvnodes; | u_long rnumvnodes; | ||||
if (__predict_false(vn_alloc_cyclecount != 0)) | if (__predict_false(vn_alloc_cyclecount != 0)) | ||||
return (vn_alloc_hard(mp)); | return (vn_alloc_hard(mp)); | ||||
rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; | rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; | ||||
if (__predict_false(vnlru_under_unlocked(rnumvnodes, vlowat))) { | if (__predict_false(vnlru_under_unlocked(rnumvnodes, vlowat))) { | ||||
atomic_subtract_long(&numvnodes, 1); | atomic_subtract_long(&numvnodes, 1); | ||||
return (vn_alloc_hard(mp)); | return (vn_alloc_hard(mp)); | ||||
} | } | ||||
return (uma_zalloc(vnode_zone, M_WAITOK)); | return (uma_zalloc_smr(vnode_zone, M_WAITOK)); | ||||
} | } | ||||
static void | static void | ||||
vn_free(struct vnode *vp) | vn_free(struct vnode *vp) | ||||
{ | { | ||||
atomic_subtract_long(&numvnodes, 1); | atomic_subtract_long(&numvnodes, 1); | ||||
uma_zfree(vnode_zone, vp); | uma_zfree_smr(vnode_zone, vp); | ||||
} | } | ||||
/* | /* | ||||
* Return the next vnode from the free list. | * Return the next vnode from the free list. | ||||
*/ | */ | ||||
int | int | ||||
getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, | getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, | ||||
struct vnode **vpp) | struct vnode **vpp) | ||||
▲ Show 20 Lines • Show All 114 Lines • ▼ Show 20 Lines | freevnode(struct vnode *vp) | ||||
* normally remain until it is needed for another vnode. We | * normally remain until it is needed for another vnode. We | ||||
* need to cleanup (or verify that the cleanup has already | * need to cleanup (or verify that the cleanup has already | ||||
* been done) any residual data left from its current use | * been done) any residual data left from its current use | ||||
* so as not to contaminate the freshly allocated vnode. | * so as not to contaminate the freshly allocated vnode. | ||||
*/ | */ | ||||
CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); | CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); | ||||
bo = &vp->v_bufobj; | bo = &vp->v_bufobj; | ||||
VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); | VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); | ||||
VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count")); | VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); | ||||
VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); | VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); | ||||
VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); | VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); | ||||
VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); | VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); | ||||
VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); | VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); | ||||
VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, | VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, | ||||
("clean blk trie not empty")); | ("clean blk trie not empty")); | ||||
VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); | VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); | ||||
VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, | VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, | ||||
▲ Show 20 Lines • Show All 1,073 Lines • ▼ Show 20 Lines | |||||
* is being destroyed. Only callers who specify LK_RETRY will | * is being destroyed. Only callers who specify LK_RETRY will | ||||
* see doomed vnodes. If inactive processing was delayed in | * see doomed vnodes. If inactive processing was delayed in | ||||
* vput try to do it here. | * vput try to do it here. | ||||
* | * | ||||
* usecount is manipulated using atomics without holding any locks. | * usecount is manipulated using atomics without holding any locks. | ||||
* | * | ||||
* holdcnt can be manipulated using atomics without holding any locks, | * holdcnt can be manipulated using atomics without holding any locks, | ||||
* except when transitioning 1<->0, in which case the interlock is held. | * except when transitioning 1<->0, in which case the interlock is held. | ||||
* | |||||
* Consumers which don't guarantee liveness of the vnode can use SMR to | |||||
* try to get a reference. Note this operation can fail since the vnode | |||||
* may be awaiting getting freed by the time they get to it. | |||||
*/ | */ | ||||
enum vgetstate | enum vgetstate | ||||
vget_prep_smr(struct vnode *vp) | |||||
{ | |||||
enum vgetstate vs; | |||||
VFS_SMR_ASSERT_ENTERED(); | |||||
if (refcount_acquire_if_not_zero(&vp->v_usecount)) { | |||||
vs = VGET_USECOUNT; | |||||
} else { | |||||
if (vhold_smr(vp)) | |||||
vs = VGET_HOLDCNT; | |||||
else | |||||
vs = VGET_NONE; | |||||
} | |||||
return (vs); | |||||
} | |||||
enum vgetstate | |||||
vget_prep(struct vnode *vp) | vget_prep(struct vnode *vp) | ||||
{ | { | ||||
enum vgetstate vs; | enum vgetstate vs; | ||||
if (refcount_acquire_if_not_zero(&vp->v_usecount)) { | if (refcount_acquire_if_not_zero(&vp->v_usecount)) { | ||||
vs = VGET_USECOUNT; | vs = VGET_USECOUNT; | ||||
} else { | } else { | ||||
vhold(vp); | vhold(vp); | ||||
▲ Show 20 Lines • Show All 53 Lines • ▼ Show 20 Lines | |||||
vget_finish(struct vnode *vp, int flags, enum vgetstate vs) | vget_finish(struct vnode *vp, int flags, enum vgetstate vs) | ||||
{ | { | ||||
int error, old; | int error, old; | ||||
if ((flags & LK_INTERLOCK) != 0) | if ((flags & LK_INTERLOCK) != 0) | ||||
ASSERT_VI_LOCKED(vp, __func__); | ASSERT_VI_LOCKED(vp, __func__); | ||||
else | else | ||||
ASSERT_VI_UNLOCKED(vp, __func__); | ASSERT_VI_UNLOCKED(vp, __func__); | ||||
VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); | |||||
VNPASS(vp->v_holdcnt > 0, vp); | VNPASS(vp->v_holdcnt > 0, vp); | ||||
VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); | VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); | ||||
error = vn_lock(vp, flags); | error = vn_lock(vp, flags); | ||||
if (__predict_false(error != 0)) { | if (__predict_false(error != 0)) { | ||||
if (vs == VGET_USECOUNT) | if (vs == VGET_USECOUNT) | ||||
vrele(vp); | vrele(vp); | ||||
else | else | ||||
▲ Show 20 Lines • Show All 445 Lines • ▼ Show 20 Lines | |||||
void | void | ||||
vhold(struct vnode *vp) | vhold(struct vnode *vp) | ||||
{ | { | ||||
struct vdbatch *vd; | struct vdbatch *vd; | ||||
int old; | int old; | ||||
CTR2(KTR_VFS, "%s: vp %p", __func__, vp); | CTR2(KTR_VFS, "%s: vp %p", __func__, vp); | ||||
old = atomic_fetchadd_int(&vp->v_holdcnt, 1); | old = atomic_fetchadd_int(&vp->v_holdcnt, 1); | ||||
VNASSERT(old >= 0, vp, ("%s: wrong hold count %d", __func__, old)); | VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, | ||||
("%s: wrong hold count %d", __func__, old)); | |||||
if (old != 0) | if (old != 0) | ||||
return; | return; | ||||
critical_enter(); | critical_enter(); | ||||
vd = DPCPU_PTR(vd); | vd = DPCPU_PTR(vd); | ||||
vd->freevnodes--; | vd->freevnodes--; | ||||
critical_exit(); | critical_exit(); | ||||
} | } | ||||
void | void | ||||
vholdl(struct vnode *vp) | vholdl(struct vnode *vp) | ||||
{ | { | ||||
ASSERT_VI_LOCKED(vp, __func__); | ASSERT_VI_LOCKED(vp, __func__); | ||||
CTR2(KTR_VFS, "%s: vp %p", __func__, vp); | CTR2(KTR_VFS, "%s: vp %p", __func__, vp); | ||||
vhold(vp); | vhold(vp); | ||||
} | } | ||||
void | void | ||||
vholdnz(struct vnode *vp) | vholdnz(struct vnode *vp) | ||||
{ | { | ||||
CTR2(KTR_VFS, "%s: vp %p", __func__, vp); | CTR2(KTR_VFS, "%s: vp %p", __func__, vp); | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); | int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); | ||||
VNASSERT(old > 0, vp, ("%s: wrong hold count %d", __func__, old)); | VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, | ||||
("%s: wrong hold count %d", __func__, old)); | |||||
#else | #else | ||||
atomic_add_int(&vp->v_holdcnt, 1); | atomic_add_int(&vp->v_holdcnt, 1); | ||||
#endif | #endif | ||||
} | } | ||||
/* | |||||
* Grab a hold count as long as the vnode is not getting freed. | |||||
* | |||||
* Only use this routine if vfs smr is the only protection you have against | |||||
* freeing the vnode. | |||||
Not Done Inline ActionsI had to solve this same problem for page busy. I did it slightly differently but related. This is VPB_FREED. The notion is, the owner that is freeing has destroyed the page identity, and sets busy to VPB_FREED. The trybusy on the lockless side may succeed right before the owner sets FREED. The lockless lookup revalidates the page identity and unlocks if it races. The unlock is special because it handles FREED. We know that if we successfully acquire it, it can not be freed. jeff: I had to solve this same problem for page busy. I did it slightly differently but related. | |||||
Done Inline ActionsI noted what I think is an equivalent in the description.
To elaborate on the last point, when profiling things like the -j 104 bzImage incremental I kept seeing vhold_smr on the profile (despite it being only done on the terminal vnode). Note its performance is hindered by the fact that this is a cmpset loop in the first place. After the dust settles we can take a look at changing it into fetchadd, but this is only possible if fetchadding cannot legally alter any flags. mjg: I noted what I think is an equivalent in the description.
1. the code as implemented is imo… | |||||
*/ | |||||
bool | |||||
vhold_smr(struct vnode *vp) | |||||
{ | |||||
int count; | |||||
VFS_SMR_ASSERT_ENTERED(); | |||||
count = atomic_load_int(&vp->v_holdcnt); | |||||
for (;;) { | |||||
if (count & VHOLD_NO_SMR) { | |||||
VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, | |||||
("non-zero hold count with flags %d\n", count)); | |||||
return (false); | |||||
} | |||||
VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); | |||||
if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) | |||||
return (true); | |||||
} | |||||
} | |||||
static void __noinline | static void __noinline | ||||
vdbatch_process(struct vdbatch *vd) | vdbatch_process(struct vdbatch *vd) | ||||
{ | { | ||||
struct vnode *vp; | struct vnode *vp; | ||||
int i; | int i; | ||||
mtx_assert(&vd->lock, MA_OWNED); | mtx_assert(&vd->lock, MA_OWNED); | ||||
MPASS(curthread->td_pinned > 0); | MPASS(curthread->td_pinned > 0); | ||||
▲ Show 20 Lines • Show All 154 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
ASSERT_VI_LOCKED(vp, __func__); | ASSERT_VI_LOCKED(vp, __func__); | ||||
CTR2(KTR_VFS, "%s: vp %p", __func__, vp); | CTR2(KTR_VFS, "%s: vp %p", __func__, vp); | ||||
if (!refcount_release(&vp->v_holdcnt)) { | if (!refcount_release(&vp->v_holdcnt)) { | ||||
VI_UNLOCK(vp); | VI_UNLOCK(vp); | ||||
return; | return; | ||||
} | } | ||||
if (VN_IS_DOOMED(vp)) { | if (!VN_IS_DOOMED(vp)) { | ||||
freevnode(vp); | vdrop_deactivate(vp); | ||||
return; | return; | ||||
} | } | ||||
vdrop_deactivate(vp); | /* | ||||
* We may be racing against vhold_smr. | |||||
* | |||||
* If they win we can just pretend we never got this far, they will | |||||
* vdrop later. | |||||
*/ | |||||
if (!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR)) { | |||||
/* | |||||
* We lost the aforementioned race. Note that any subsequent | |||||
* access is invalid as they might have managed to vdropl on | |||||
* their own. | |||||
*/ | |||||
return; | |||||
} | } | ||||
freevnode(vp); | |||||
} | |||||
/* | /* | ||||
* Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT | * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT | ||||
* flags. DOINGINACT prevents us from recursing in calls to vinactive. | * flags. DOINGINACT prevents us from recursing in calls to vinactive. | ||||
*/ | */ | ||||
static void | static void | ||||
vinactivef(struct vnode *vp) | vinactivef(struct vnode *vp) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 438 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Print out a description of a vnode. | * Print out a description of a vnode. | ||||
*/ | */ | ||||
static const char * const typename[] = | static const char * const typename[] = | ||||
{"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", | {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", | ||||
"VMARKER"}; | "VMARKER"}; | ||||
_Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, | |||||
"new hold count flag not added to vn_printf"); | |||||
void | void | ||||
vn_printf(struct vnode *vp, const char *fmt, ...) | vn_printf(struct vnode *vp, const char *fmt, ...) | ||||
{ | { | ||||
va_list ap; | va_list ap; | ||||
char buf[256], buf2[16]; | char buf[256], buf2[16]; | ||||
u_long flags; | u_long flags; | ||||
u_int holdcnt; | |||||
va_start(ap, fmt); | va_start(ap, fmt); | ||||
vprintf(fmt, ap); | vprintf(fmt, ap); | ||||
va_end(ap); | va_end(ap); | ||||
printf("%p: ", (void *)vp); | printf("%p: ", (void *)vp); | ||||
printf("type %s\n", typename[vp->v_type]); | printf("type %s\n", typename[vp->v_type]); | ||||
holdcnt = atomic_load_int(&vp->v_holdcnt); | |||||
printf(" usecount %d, writecount %d, refcount %d", | printf(" usecount %d, writecount %d, refcount %d", | ||||
vp->v_usecount, vp->v_writecount, vp->v_holdcnt); | vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS); | ||||
switch (vp->v_type) { | switch (vp->v_type) { | ||||
case VDIR: | case VDIR: | ||||
printf(" mountedhere %p\n", vp->v_mountedhere); | printf(" mountedhere %p\n", vp->v_mountedhere); | ||||
break; | break; | ||||
case VCHR: | case VCHR: | ||||
printf(" rdev %p\n", vp->v_rdev); | printf(" rdev %p\n", vp->v_rdev); | ||||
break; | break; | ||||
case VSOCK: | case VSOCK: | ||||
printf(" socket %p\n", vp->v_unpcb); | printf(" socket %p\n", vp->v_unpcb); | ||||
break; | break; | ||||
case VFIFO: | case VFIFO: | ||||
printf(" fifoinfo %p\n", vp->v_fifoinfo); | printf(" fifoinfo %p\n", vp->v_fifoinfo); | ||||
break; | break; | ||||
default: | default: | ||||
printf("\n"); | printf("\n"); | ||||
break; | break; | ||||
} | } | ||||
buf[0] = '\0'; | |||||
buf[1] = '\0'; | |||||
if (holdcnt & VHOLD_NO_SMR) | |||||
strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf)); | |||||
printf(" hold count flags (%s)\n", buf + 1); | |||||
buf[0] = '\0'; | buf[0] = '\0'; | ||||
buf[1] = '\0'; | buf[1] = '\0'; | ||||
if (vp->v_irflag & VIRF_DOOMED) | if (vp->v_irflag & VIRF_DOOMED) | ||||
strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); | strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); | ||||
flags = vp->v_irflag & ~(VIRF_DOOMED); | flags = vp->v_irflag & ~(VIRF_DOOMED); | ||||
if (flags != 0) { | if (flags != 0) { | ||||
snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); | snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); | ||||
strlcat(buf, buf2, sizeof(buf)); | strlcat(buf, buf2, sizeof(buf)); | ||||
▲ Show 20 Lines • Show All 2,384 Lines • Show Last 20 Lines |
If you have ordering issues in later patches you can just manually smr_create() first thing and pass it around everywhere even here.