Changeset View
Standalone View
sys/fs/devfs/devfs_vnops.c
Show First 20 Lines • Show All 216 Lines • ▼ Show 20 Lines | devfs_clear_cdevpriv(void) | ||||
struct file *fp; | struct file *fp; | ||||
fp = curthread->td_fpop; | fp = curthread->td_fpop; | ||||
if (fp == NULL) | if (fp == NULL) | ||||
return; | return; | ||||
devfs_fpdrop(fp); | devfs_fpdrop(fp); | ||||
} | } | ||||
static void | |||||
devfs_usecount_inc(struct vnode *vp) | |||||
{ | |||||
struct devfs_dirent *de; | |||||
struct cdev *dev; | |||||
ASSERT_VI_LOCKED(vp, __func__); | |||||
kib: Why do you need VI_LOCK() in all places covered by devfs_de_interlock ?
May be, declare that… | |||||
Done Inline ActionsI need stable ->v_data which right now is either dev lock or vnode interlock. mjg: I need stable ->v_data which right now is either dev lock or vnode interlock. | |||||
Not Done Inline Actionsv_data is under both vnode lock and de_interlock, and _not_ protected by the vnode interlock. Look at devfs_reclaim. kib: v_data is under both vnode lock and de_interlock, and _not_ protected by the vnode interlock. | |||||
if (VN_IS_DOOMED(vp)) | |||||
return; | |||||
de = vp->v_data; | |||||
dev = vp->v_rdev; | |||||
Not Done Inline ActionsFor dereferencing v_data for devfs vnode, you either need to have the vnode lock, or devfs_de_interlock. Same is true for de->de_vnode. If you own the vnode lock always there, you should assert it, otherwise interlock help is needed. kib: For dereferencing v_data for devfs vnode, you either need to have the vnode lock, or… | |||||
MPASS(de != NULL); | |||||
MPASS(dev != NULL); | |||||
if ((de->de_flags2 & DE2_OPENED) == 0) { | |||||
de->de_flags2 |= DE2_OPENED; | |||||
dev_lock(); | |||||
dev->si_usecount++; | |||||
dev_unlock(); | |||||
} | |||||
} | |||||
static void | |||||
devfs_usecount_dec(struct vnode *vp) | |||||
{ | |||||
struct devfs_dirent *de; | |||||
struct cdev *dev; | |||||
ASSERT_VI_LOCKED(vp, __func__); | |||||
de = vp->v_data; | |||||
dev = vp->v_rdev; | |||||
MPASS(de != NULL); | |||||
MPASS(dev != NULL); | |||||
if ((de->de_flags2 & DE2_OPENED) != 0) { | |||||
de->de_flags2 &= ~DE2_OPENED; | |||||
dev_lock(); | |||||
if (dev->si_usecount == 0) | |||||
panic("%s: si_usecount undeflow for dev %p\n", | |||||
__func__, dev); | |||||
dev->si_usecount--; | |||||
dev_unlock(); | |||||
} | |||||
} | |||||
/* | /* | ||||
* On success devfs_populate_vp() returns with dmp->dm_lock held. | * On success devfs_populate_vp() returns with dmp->dm_lock held. | ||||
*/ | */ | ||||
static int | static int | ||||
devfs_populate_vp(struct vnode *vp) | devfs_populate_vp(struct vnode *vp) | ||||
{ | { | ||||
struct devfs_dirent *de; | struct devfs_dirent *de; | ||||
struct devfs_mount *dmp; | struct devfs_mount *dmp; | ||||
int locked; | int locked; | ||||
ASSERT_VOP_LOCKED(vp, "devfs_populate_vp"); | ASSERT_VOP_LOCKED(vp, "devfs_populate_vp"); | ||||
dmp = VFSTODEVFS(vp->v_mount); | dmp = VFSTODEVFS(vp->v_mount); | ||||
locked = VOP_ISLOCKED(vp); | locked = VOP_ISLOCKED(vp); | ||||
sx_xlock(&dmp->dm_lock); | sx_xlock(&dmp->dm_lock); | ||||
DEVFS_DMP_HOLD(dmp); | DEVFS_DMP_HOLD(dmp); | ||||
/* Can't call devfs_populate() with the vnode lock held. */ | /* Can't call devfs_populate() with the vnode lock held. */ | ||||
VOP_UNLOCK(vp); | VOP_UNLOCK(vp); | ||||
devfs_populate(dmp); | devfs_populate(dmp); | ||||
sx_xunlock(&dmp->dm_lock); | sx_xunlock(&dmp->dm_lock); | ||||
vn_lock(vp, locked | LK_RETRY); | vn_lock(vp, locked | LK_RETRY); | ||||
sx_xlock(&dmp->dm_lock); | sx_xlock(&dmp->dm_lock); | ||||
Not Done Inline Actionsstatic ? kib: static ? | |||||
Done Inline Actionswill make it static mjg: will make it static | |||||
if (DEVFS_DMP_DROP(dmp)) { | if (DEVFS_DMP_DROP(dmp)) { | ||||
sx_xunlock(&dmp->dm_lock); | sx_xunlock(&dmp->dm_lock); | ||||
devfs_unmount_final(dmp); | devfs_unmount_final(dmp); | ||||
return (ERESTART); | return (ERESTART); | ||||
} | } | ||||
if (VN_IS_DOOMED(vp)) { | if (VN_IS_DOOMED(vp)) { | ||||
sx_xunlock(&dmp->dm_lock); | sx_xunlock(&dmp->dm_lock); | ||||
return (ERESTART); | return (ERESTART); | ||||
} | } | ||||
de = vp->v_data; | de = vp->v_data; | ||||
KASSERT(de != NULL, | KASSERT(de != NULL, | ||||
("devfs_populate_vp: vp->v_data == NULL but vnode not doomed")); | ("devfs_populate_vp: vp->v_data == NULL but vnode not doomed")); | ||||
if ((de->de_flags & DE_DOOMED) != 0) { | if ((de->de_flags & DE_DOOMED) != 0) { | ||||
sx_xunlock(&dmp->dm_lock); | sx_xunlock(&dmp->dm_lock); | ||||
return (ERESTART); | return (ERESTART); | ||||
} | } | ||||
return (0); | return (0); | ||||
} | } | ||||
static int | static int | ||||
devfs_vptocnp(struct vop_vptocnp_args *ap) | devfs_vptocnp(struct vop_vptocnp_args *ap) | ||||
{ | { | ||||
struct vnode *vp = ap->a_vp; | struct vnode *vp = ap->a_vp; | ||||
Not Done Inline ActionsCan this become static ? kib: Can this become static ? | |||||
Done Inline Actionsunref is called from places outside of the file, I think it would be weird to have a mismatch here mjg: unref is called from places outside of the file, I think it would be weird to have a mismatch… | |||||
struct vnode **dvp = ap->a_vpp; | struct vnode **dvp = ap->a_vpp; | ||||
struct devfs_mount *dmp; | struct devfs_mount *dmp; | ||||
char *buf = ap->a_buf; | char *buf = ap->a_buf; | ||||
size_t *buflen = ap->a_buflen; | size_t *buflen = ap->a_buflen; | ||||
struct devfs_dirent *dd, *de; | struct devfs_dirent *dd, *de; | ||||
int i, error; | int i, error; | ||||
dmp = VFSTODEVFS(vp->v_mount); | dmp = VFSTODEVFS(vp->v_mount); | ||||
▲ Show 20 Lines • Show All 193 Lines • ▼ Show 20 Lines | loop: | ||||
if (de->de_dirent->d_type == DT_CHR) { | if (de->de_dirent->d_type == DT_CHR) { | ||||
vp->v_type = VCHR; | vp->v_type = VCHR; | ||||
VI_LOCK(vp); | VI_LOCK(vp); | ||||
dev_lock(); | dev_lock(); | ||||
dev_refl(dev); | dev_refl(dev); | ||||
/* XXX: v_rdev should be protect by vnode lock */ | /* XXX: v_rdev should be protect by vnode lock */ | ||||
vp->v_rdev = dev; | vp->v_rdev = dev; | ||||
VNPASS(vp->v_usecount == 1, vp); | VNPASS(vp->v_usecount == 1, vp); | ||||
dev->si_usecount++; | |||||
/* Special casing of ttys for deadfs. Probably redundant. */ | /* Special casing of ttys for deadfs. Probably redundant. */ | ||||
dsw = dev->si_devsw; | dsw = dev->si_devsw; | ||||
if (dsw != NULL && (dsw->d_flags & D_TTY) != 0) | if (dsw != NULL && (dsw->d_flags & D_TTY) != 0) | ||||
vp->v_vflag |= VV_ISTTY; | vp->v_vflag |= VV_ISTTY; | ||||
dev_unlock(); | dev_unlock(); | ||||
VI_UNLOCK(vp); | VI_UNLOCK(vp); | ||||
if ((dev->si_flags & SI_ETERNAL) != 0) | if ((dev->si_flags & SI_ETERNAL) != 0) | ||||
vp->v_vflag |= VV_ETERNALDEV; | vp->v_vflag |= VV_ETERNALDEV; | ||||
▲ Show 20 Lines • Show All 66 Lines • ▼ Show 20 Lines | |||||
devfs_close(struct vop_close_args *ap) | devfs_close(struct vop_close_args *ap) | ||||
{ | { | ||||
struct vnode *vp = ap->a_vp, *oldvp; | struct vnode *vp = ap->a_vp, *oldvp; | ||||
struct thread *td = ap->a_td; | struct thread *td = ap->a_td; | ||||
struct proc *p; | struct proc *p; | ||||
struct cdev *dev = vp->v_rdev; | struct cdev *dev = vp->v_rdev; | ||||
struct cdevsw *dsw; | struct cdevsw *dsw; | ||||
int dflags, error, ref, vp_locked; | int dflags, error, ref, vp_locked; | ||||
int si_usecount; | |||||
/* | /* | ||||
* XXX: Don't call d_close() if we were called because of | * XXX: Don't call d_close() if we were called because of | ||||
* XXX: insmntque1() failure. | * XXX: insmntque1() failure. | ||||
*/ | */ | ||||
if (vp->v_data == NULL) | if (vp->v_data == NULL) | ||||
return (0); | return (0); | ||||
Show All 40 Lines | devfs_close(struct vop_close_args *ap) | ||||
* sum of the reference counts on all the aliased | * sum of the reference counts on all the aliased | ||||
* vnodes descends to one, we are on last close. | * vnodes descends to one, we are on last close. | ||||
*/ | */ | ||||
dsw = dev_refthread(dev, &ref); | dsw = dev_refthread(dev, &ref); | ||||
if (dsw == NULL) | if (dsw == NULL) | ||||
return (ENXIO); | return (ENXIO); | ||||
dflags = 0; | dflags = 0; | ||||
VI_LOCK(vp); | VI_LOCK(vp); | ||||
if (vp->v_usecount == 1 && vcount(vp) == 1) | si_usecount = vcount(vp); | ||||
if (VN_IS_DOOMED(vp) || (vp->v_usecount == 1 && si_usecount == 1)) | |||||
kibUnsubmitted Not Done Inline ActionsWhy VN_IS_DOOMED() test appeared there ? Wouldn't it cause useless (or panicing, not sure) calls to devfs_usecount_dec() ? kib: Why VN_IS_DOOMED() test appeared there ? Wouldn't it cause useless (or panicing, not sure)… | |||||
devfs_usecount_dec(vp); | |||||
if (vp->v_usecount == 1 && si_usecount == 1) | |||||
kibUnsubmitted Not Done Inline ActionsThe check for v_usecount == 1 was used to try to avoid issues with parallel opens and close sometimes mismatched or loose FLASTCLOSE calls. You patch makes the race wider, by postponing usecount_inc() after d_open() method returns (d_open()/d_close() often sleep). bde tried to handle the race in a way that would be acceptable for drivers, mostly serial hardware, but it was too complex by requiring inspecting each tty driver. I hesitate to change this aspect of devfs without having some plan how to solve that race. kib: The check for v_usecount == 1 was used to try to avoid issues with parallel opens and close… | |||||
Not Done Inline ActionsIf devfs_usecountl() == 1, doesn't it imply that de_usecount is also 1 ? Otherwise, how did we get close on this de/vp ? Or do you think there is a race ? If not, I think it would be an interesting experiment to commit this with assert and see what happens in wild. kib: If devfs_usecountl() == 1, doesn't it imply that de_usecount is also 1 ? Otherwise, how did we… | |||||
Done Inline ActionsThere can be no race here as both counters are always updated together with devfs_de_interlock held. The separate check is a leftover from previous code which had "updated the global count on 0<->1 transitions of local count" policy. I'll remove it. Said policy is beneficial to implement as it would save on global lock acquire in the common case, but runs into lock ordering issues with vnode interlock. Iow it's just some mess which can be changed later. mjg: There can be no race here as both counters are always updated together with devfs_de_interlock… | |||||
dflags |= FLASTCLOSE; | dflags |= FLASTCLOSE; | ||||
if (VN_IS_DOOMED(vp)) { | if (VN_IS_DOOMED(vp)) { | ||||
/* Forced close. */ | /* Forced close. */ | ||||
dflags |= FREVOKE | FNONBLOCK; | dflags |= FREVOKE | FNONBLOCK; | ||||
} else if (dsw->d_flags & D_TRACKCLOSE) { | } else if (dsw->d_flags & D_TRACKCLOSE) { | ||||
/* Keep device updated on status. */ | /* Keep device updated on status. */ | ||||
} else if ((dflags & FLASTCLOSE) == 0) { | } else if ((dflags & FLASTCLOSE) == 0) { | ||||
VI_UNLOCK(vp); | VI_UNLOCK(vp); | ||||
▲ Show 20 Lines • Show All 535 Lines • ▼ Show 20 Lines | devfs_open(struct vop_open_args *ap) | ||||
else | else | ||||
error = dsw->d_open(dev, ap->a_mode, S_IFCHR, td); | error = dsw->d_open(dev, ap->a_mode, S_IFCHR, td); | ||||
/* Clean up any cdevpriv upon error. */ | /* Clean up any cdevpriv upon error. */ | ||||
if (error != 0) | if (error != 0) | ||||
devfs_clear_cdevpriv(); | devfs_clear_cdevpriv(); | ||||
td->td_fpop = fpop; | td->td_fpop = fpop; | ||||
vn_lock(vp, vlocked | LK_RETRY); | vn_lock(vp, vlocked | LK_RETRY); | ||||
if (error == 0) { | |||||
VI_LOCK(vp); | |||||
devfs_usecount_inc(vp); | |||||
VI_UNLOCK(vp); | |||||
} | |||||
dev_relthread(dev, ref); | dev_relthread(dev, ref); | ||||
if (error != 0) { | if (error != 0) { | ||||
if (error == ERESTART) | if (error == ERESTART) | ||||
error = EINTR; | error = EINTR; | ||||
return (error); | return (error); | ||||
} | } | ||||
#if 0 /* /dev/console */ | #if 0 /* /dev/console */ | ||||
▲ Show 20 Lines • Show All 219 Lines • ▼ Show 20 Lines | |||||
static int | static int | ||||
devfs_reclaim(struct vop_reclaim_args *ap) | devfs_reclaim(struct vop_reclaim_args *ap) | ||||
{ | { | ||||
struct vnode *vp; | struct vnode *vp; | ||||
struct devfs_dirent *de; | struct devfs_dirent *de; | ||||
vp = ap->a_vp; | vp = ap->a_vp; | ||||
mtx_lock(&devfs_de_interlock); | mtx_lock(&devfs_de_interlock); | ||||
Not Done Inline ActionsWhy is this call needed ? Wouldn't you mechanism only needed for VCHR vnodes ? BTW, I think that you should assert the vnode type in devfs_usecount_*(). kib: Why is this call needed ? Wouldn't you mechanism only needed for VCHR vnodes ?
BTW, I think… | |||||
de = vp->v_data; | de = vp->v_data; | ||||
if (de != NULL) { | if (de != NULL) { | ||||
de->de_vnode = NULL; | de->de_vnode = NULL; | ||||
vp->v_data = NULL; | vp->v_data = NULL; | ||||
} | } | ||||
mtx_unlock(&devfs_de_interlock); | mtx_unlock(&devfs_de_interlock); | ||||
return (0); | return (0); | ||||
} | } | ||||
static int | static int | ||||
devfs_reclaim_vchr(struct vop_reclaim_args *ap) | devfs_reclaim_vchr(struct vop_reclaim_args *ap) | ||||
{ | { | ||||
struct vnode *vp; | struct vnode *vp; | ||||
struct cdev *dev; | struct cdev *dev; | ||||
vp = ap->a_vp; | vp = ap->a_vp; | ||||
MPASS(vp->v_type == VCHR); | MPASS(vp->v_type == VCHR); | ||||
devfs_reclaim(ap); | devfs_reclaim(ap); | ||||
VI_LOCK(vp); | VI_LOCK(vp); | ||||
dev_lock(); | dev_lock(); | ||||
dev = vp->v_rdev; | dev = vp->v_rdev; | ||||
vp->v_rdev = NULL; | vp->v_rdev = NULL; | ||||
if (dev != NULL) | |||||
dev->si_usecount -= (vp->v_usecount > 0); | |||||
dev_unlock(); | dev_unlock(); | ||||
VI_UNLOCK(vp); | VI_UNLOCK(vp); | ||||
if (dev != NULL) | if (dev != NULL) | ||||
dev_rel(dev); | dev_rel(dev); | ||||
return (0); | return (0); | ||||
} | } | ||||
static int | static int | ||||
▲ Show 20 Lines • Show All 539 Lines • Show Last 20 Lines |
Why do you need VI_LOCK() in all places covered by devfs_de_interlock ?
May be, declare that device usecount is protected by the de_interlock mtx ?