Changeset View
Changeset View
Standalone View
Standalone View
sys/fs/nullfs/null_vnops.c
Show First 20 Lines • Show All 333 Lines • ▼ Show 20 Lines | |||||
static int | static int | ||||
null_add_writecount(struct vop_add_writecount_args *ap) | null_add_writecount(struct vop_add_writecount_args *ap) | ||||
{ | { | ||||
struct vnode *lvp, *vp; | struct vnode *lvp, *vp; | ||||
int error; | int error; | ||||
vp = ap->a_vp; | vp = ap->a_vp; | ||||
lvp = NULLVPTOLOWERVP(vp); | lvp = NULLVPTOLOWERVP(vp); | ||||
KASSERT(vp->v_writecount + ap->a_inc >= 0, ("wrong writecount inc")); | VI_LOCK(vp); | ||||
if (vp->v_writecount > 0 && vp->v_writecount + ap->a_inc == 0) | /* text refs are bypassed to lowervp */ | ||||
error = VOP_ADD_WRITECOUNT(lvp, -1); | VNASSERT(vp->v_writecount >= 0, vp, ("wrong null writecount")); | ||||
else if (vp->v_writecount == 0 && vp->v_writecount + ap->a_inc > 0) | VNASSERT(vp->v_writecount + ap->a_inc >= 0, vp, | ||||
error = VOP_ADD_WRITECOUNT(lvp, 1); | ("wrong writecount inc %d", ap->a_inc)); | ||||
else | error = VOP_ADD_WRITECOUNT(lvp, ap->a_inc); | ||||
error = 0; | |||||
if (error == 0) | if (error == 0) | ||||
vp->v_writecount += ap->a_inc; | vp->v_writecount += ap->a_inc; | ||||
VI_UNLOCK(vp); | |||||
return (error); | return (error); | ||||
} | } | ||||
/* | /* | ||||
* We have to carry on the locking protocol on the null layer vnodes | * We have to carry on the locking protocol on the null layer vnodes | ||||
* as we progress through the tree. We also have to enforce read-only | * as we progress through the tree. We also have to enforce read-only | ||||
* if this layer is mounted read-only. | * if this layer is mounted read-only. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 438 Lines • ▼ Show 20 Lines | null_reclaim(struct vop_reclaim_args *ap) | ||||
* Use the interlock to protect the clearing of v_data to | * Use the interlock to protect the clearing of v_data to | ||||
* prevent faults in null_lock(). | * prevent faults in null_lock(). | ||||
*/ | */ | ||||
lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL); | lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL); | ||||
VI_LOCK(vp); | VI_LOCK(vp); | ||||
vp->v_data = NULL; | vp->v_data = NULL; | ||||
vp->v_object = NULL; | vp->v_object = NULL; | ||||
vp->v_vnlock = &vp->v_lock; | vp->v_vnlock = &vp->v_lock; | ||||
VI_UNLOCK(vp); | |||||
/* | /* | ||||
* If we were opened for write, we leased one write reference | * If we were opened for write, we leased the write reference | ||||
* to the lower vnode. If this is a reclamation due to the | * to the lower vnode. If this is a reclamation due to the | ||||
* forced unmount, undo the reference now. | * forced unmount, undo the reference now. | ||||
*/ | */ | ||||
if (vp->v_writecount > 0) | if (vp->v_writecount > 0) | ||||
VOP_ADD_WRITECOUNT(lowervp, -1); | VOP_ADD_WRITECOUNT(lowervp, -vp->v_writecount); | ||||
VI_UNLOCK(vp); | |||||
if ((xp->null_flags & NULLV_NOUNLOCK) != 0) | if ((xp->null_flags & NULLV_NOUNLOCK) != 0) | ||||
vunref(lowervp); | vunref(lowervp); | ||||
else | else | ||||
vput(lowervp); | vput(lowervp); | ||||
free(xp, M_NULLFSNODE); | free(xp, M_NULLFSNODE); | ||||
return (0); | return (0); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 121 Lines • Show Last 20 Lines |