Changeset View
Changeset View
Standalone View
Standalone View
sys/kern/vfs_subr.c
Show First 20 Lines • Show All 658 Lines • ▼ Show 20 Lines | vntblinit(void *dummy __unused) | ||||
mtx_lock(&vnode_list_mtx); | mtx_lock(&vnode_list_mtx); | ||||
vnlru_recalc(); | vnlru_recalc(); | ||||
mtx_unlock(&vnode_list_mtx); | mtx_unlock(&vnode_list_mtx); | ||||
vnode_list_free_marker = vn_alloc_marker(NULL); | vnode_list_free_marker = vn_alloc_marker(NULL); | ||||
TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); | TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); | ||||
vnode_list_reclaim_marker = vn_alloc_marker(NULL); | vnode_list_reclaim_marker = vn_alloc_marker(NULL); | ||||
TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); | TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); | ||||
vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, | vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, | ||||
vnode_init, vnode_fini, UMA_ALIGN_PTR, UMA_ZONE_SMR); | vnode_init, vnode_fini, UMA_ALIGN_PTR, 0); | ||||
vfs_smr = uma_zone_get_smr(vnode_zone); | uma_zone_set_smr(vnode_zone, vfs_smr); | ||||
vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), | vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), | ||||
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); | NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); | ||||
/* | /* | ||||
* Preallocate enough nodes to support one-per buf so that | * Preallocate enough nodes to support one-per buf so that | ||||
* we can not fail an insert. reassignbuf() callers can not | * we can not fail an insert. reassignbuf() callers can not | ||||
* tolerate the insertion failure. | * tolerate the insertion failure. | ||||
*/ | */ | ||||
buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), | buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), | ||||
▲ Show 20 Lines • Show All 2,208 Lines • ▼ Show 20 Lines | if (refcount_acquire_if_not_zero(&vp->v_usecount)) { | ||||
vs = VGET_USECOUNT; | vs = VGET_USECOUNT; | ||||
} else { | } else { | ||||
vhold(vp); | vhold(vp); | ||||
vs = VGET_HOLDCNT; | vs = VGET_HOLDCNT; | ||||
} | } | ||||
return (vs); | return (vs); | ||||
} | } | ||||
void | |||||
vget_abort(struct vnode *vp, enum vgetstate vs) | |||||
{ | |||||
switch (vs) { | |||||
case VGET_USECOUNT: | |||||
vrele(vp); | |||||
break; | |||||
case VGET_HOLDCNT: | |||||
vdrop(vp); | |||||
default: | |||||
__assert_unreachable(); | |||||
} | |||||
} | |||||
int | int | ||||
vget(struct vnode *vp, int flags, struct thread *td) | vget(struct vnode *vp, int flags, struct thread *td) | ||||
{ | { | ||||
enum vgetstate vs; | enum vgetstate vs; | ||||
MPASS(td == curthread); | MPASS(td == curthread); | ||||
vs = vget_prep(vp); | vs = vget_prep(vp); | ||||
▲ Show 20 Lines • Show All 46 Lines • ▼ Show 20 Lines | vget_finish(struct vnode *vp, int flags, enum vgetstate vs) | ||||
else | else | ||||
ASSERT_VI_UNLOCKED(vp, __func__); | ASSERT_VI_UNLOCKED(vp, __func__); | ||||
VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); | VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); | ||||
VNPASS(vp->v_holdcnt > 0, vp); | VNPASS(vp->v_holdcnt > 0, vp); | ||||
VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); | VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); | ||||
error = vn_lock(vp, flags); | error = vn_lock(vp, flags); | ||||
if (__predict_false(error != 0)) { | if (__predict_false(error != 0)) { | ||||
if (vs == VGET_USECOUNT) | vget_abort(vp, vs); | ||||
vrele(vp); | |||||
else | |||||
vdrop(vp); | |||||
CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, | CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, | ||||
vp); | vp); | ||||
return (error); | return (error); | ||||
} | } | ||||
if (vs == VGET_USECOUNT) | if (vs == VGET_USECOUNT) | ||||
return (0); | return (0); | ||||
▲ Show 20 Lines • Show All 61 Lines • ▼ Show 20 Lines | vref_vchr(struct vnode *vp, bool interlock) | ||||
vhold(vp); | vhold(vp); | ||||
v_incr_devcount(vp); | v_incr_devcount(vp); | ||||
refcount_acquire(&vp->v_usecount); | refcount_acquire(&vp->v_usecount); | ||||
if (!interlock) | if (!interlock) | ||||
VI_UNLOCK(vp); | VI_UNLOCK(vp); | ||||
return; | return; | ||||
} | } | ||||
bool | |||||
vref_smr(struct vnode *vp) | |||||
{ | |||||
int old; | |||||
CTR2(KTR_VFS, "%s: vp %p", __func__, vp); | |||||
VFS_SMR_ASSERT_ENTERED(); | |||||
/* | |||||
* Devices are not supported since they may require taking the interlock. | |||||
*/ | |||||
VNPASS(vp->v_type != VCHR, vp); | |||||
if (refcount_acquire_if_not_zero(&vp->v_usecount)) { | |||||
VNODE_REFCOUNT_FENCE_ACQ(); | |||||
VNPASS(vp->v_holdcnt > 0, vp); | |||||
return (true); | |||||
} | |||||
if (!vhold_smr(vp)) | |||||
return (false); | |||||
/* | |||||
* See the comment in vget_finish. | |||||
*/ | |||||
old = atomic_fetchadd_int(&vp->v_usecount, 1); | |||||
VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); | |||||
if (old != 0) { | |||||
#ifdef INVARIANTS | |||||
old = atomic_fetchadd_int(&vp->v_holdcnt, -1); | |||||
VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); | |||||
#else | |||||
refcount_release(&vp->v_holdcnt); | |||||
#endif | |||||
} | |||||
kib: Why not extract this block from there and vget_finish() into a common function ? | |||||
Done Inline Actionsok, will update later mjg: ok, will update later | |||||
Done Inline ActionsSo I don't know how to name this. In the patchset I introduce vget_finish_ref later to facilitate WANTPARENT/!LOCKLEAF lookups, but I'm not going to call something like that from vref. mjg: So I don't know how to name this. In the patchset I introduce vget_finish_ref later to… | |||||
Not Done Inline ActionsI would call it vref_hold() or vget_ref_hold(). This name describes the action of the code. kib: I would call it vref_hold() or vget_ref_hold(). This name describes the action of the code. | |||||
Done Inline ActionsI reworked the only consumer of vref_smr to use vget direclty instead, thus getting rid of the problem for the time being. mjg: I reworked the only consumer of vref_smr to use vget direclty instead, thus getting rid of the… | |||||
return (true); | |||||
} | |||||
void | void | ||||
vref(struct vnode *vp) | vref(struct vnode *vp) | ||||
{ | { | ||||
int old; | int old; | ||||
CTR2(KTR_VFS, "%s: vp %p", __func__, vp); | CTR2(KTR_VFS, "%s: vp %p", __func__, vp); | ||||
if (__predict_false(vp->v_type == VCHR)) { | if (__predict_false(vp->v_type == VCHR)) { | ||||
vref_vchr(vp, false); | vref_vchr(vp, false); | ||||
▲ Show 20 Lines • Show All 1,349 Lines • ▼ Show 20 Lines | #define MNT_KERN_FLAG(flag) do { \ | ||||
MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); | MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); | ||||
MNT_KERN_FLAG(MNTK_SHARED_WRITES); | MNT_KERN_FLAG(MNTK_SHARED_WRITES); | ||||
MNT_KERN_FLAG(MNTK_NO_IOPF); | MNT_KERN_FLAG(MNTK_NO_IOPF); | ||||
MNT_KERN_FLAG(MNTK_VGONE_UPPER); | MNT_KERN_FLAG(MNTK_VGONE_UPPER); | ||||
MNT_KERN_FLAG(MNTK_VGONE_WAITER); | MNT_KERN_FLAG(MNTK_VGONE_WAITER); | ||||
MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); | MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); | ||||
MNT_KERN_FLAG(MNTK_MARKER); | MNT_KERN_FLAG(MNTK_MARKER); | ||||
MNT_KERN_FLAG(MNTK_USES_BCACHE); | MNT_KERN_FLAG(MNTK_USES_BCACHE); | ||||
MNT_KERN_FLAG(MNTK_FPLOOKUP); | |||||
MNT_KERN_FLAG(MNTK_NOASYNC); | MNT_KERN_FLAG(MNTK_NOASYNC); | ||||
MNT_KERN_FLAG(MNTK_UNMOUNT); | MNT_KERN_FLAG(MNTK_UNMOUNT); | ||||
MNT_KERN_FLAG(MNTK_MWAIT); | MNT_KERN_FLAG(MNTK_MWAIT); | ||||
MNT_KERN_FLAG(MNTK_SUSPEND); | MNT_KERN_FLAG(MNTK_SUSPEND); | ||||
MNT_KERN_FLAG(MNTK_SUSPEND2); | MNT_KERN_FLAG(MNTK_SUSPEND2); | ||||
MNT_KERN_FLAG(MNTK_SUSPENDED); | MNT_KERN_FLAG(MNTK_SUSPENDED); | ||||
MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); | MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); | ||||
MNT_KERN_FLAG(MNTK_NOKNOTE); | MNT_KERN_FLAG(MNTK_NOKNOTE); | ||||
▲ Show 20 Lines • Show All 800 Lines • ▼ Show 20 Lines | vn_isdisk(struct vnode *vp, int *errp) | ||||
dev_unlock(); | dev_unlock(); | ||||
out: | out: | ||||
if (errp != NULL) | if (errp != NULL) | ||||
*errp = error; | *errp = error; | ||||
return (error == 0); | return (error == 0); | ||||
} | } | ||||
/* | /* | ||||
* VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see | |||||
* the comment above cache_fplookup for details. | |||||
* | |||||
* We never deny as priv_check_cred calls are not yet supported, see vaccess. | |||||
*/ | |||||
int | |||||
vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred) | |||||
{ | |||||
VFS_SMR_ASSERT_ENTERED(); | |||||
/* Check the owner. */ | |||||
if (cred->cr_uid == file_uid) { | |||||
if (file_mode & S_IXUSR) | |||||
return (0); | |||||
return (EAGAIN); | |||||
} | |||||
/* Otherwise, check the groups (first match) */ | |||||
if (groupmember(file_gid, cred)) { | |||||
if (file_mode & S_IXGRP) | |||||
return (0); | |||||
return (EAGAIN); | |||||
} | |||||
/* Otherwise, check everyone else. */ | |||||
if (file_mode & S_IXOTH) | |||||
return (0); | |||||
return (EAGAIN); | |||||
} | |||||
/* | |||||
* Common filesystem object access control check routine. Accepts a | * Common filesystem object access control check routine. Accepts a | ||||
* vnode's type, "mode", uid and gid, requested access mode, credentials, | * vnode's type, "mode", uid and gid, requested access mode, credentials, | ||||
* and optional call-by-reference privused argument allowing vaccess() | * and optional call-by-reference privused argument allowing vaccess() | ||||
* to indicate to the caller whether privilege was used to satisfy the | * to indicate to the caller whether privilege was used to satisfy the | ||||
* request (obsoleted). Returns 0 on success, or an errno on failure. | * request (obsoleted). Returns 0 on success, or an errno on failure. | ||||
*/ | */ | ||||
int | int | ||||
vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, | vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, | ||||
▲ Show 20 Lines • Show All 281 Lines • ▼ Show 20 Lines | #endif | ||||
if (a->a_tvp != a->a_fvp) | if (a->a_tvp != a->a_fvp) | ||||
vhold(a->a_fvp); | vhold(a->a_fvp); | ||||
vhold(a->a_tdvp); | vhold(a->a_tdvp); | ||||
if (a->a_tvp) | if (a->a_tvp) | ||||
vhold(a->a_tvp); | vhold(a->a_tvp); | ||||
} | } | ||||
#ifdef DEBUG_VFS_LOCKS | #ifdef DEBUG_VFS_LOCKS | ||||
void | |||||
vop_fplookup_vexec_pre(void *ap __unused) | |||||
{ | |||||
VFS_SMR_ASSERT_ENTERED(); | |||||
} | |||||
void | |||||
vop_fplookup_vexec_post(void *ap __unused, int rc __unused) | |||||
{ | |||||
VFS_SMR_ASSERT_ENTERED(); | |||||
} | |||||
void | void | ||||
vop_strategy_pre(void *ap) | vop_strategy_pre(void *ap) | ||||
{ | { | ||||
struct vop_strategy_args *a; | struct vop_strategy_args *a; | ||||
struct buf *bp; | struct buf *bp; | ||||
a = ap; | a = ap; | ||||
bp = a->a_bp; | bp = a->a_bp; | ||||
▲ Show 20 Lines • Show All 1,162 Lines • Show Last 20 Lines |
Why not extract this block from there and vget_finish() into a common function ?