Index: fs/nfs/nfs_commonkrpc.c =================================================================== --- fs/nfs/nfs_commonkrpc.c +++ fs/nfs/nfs_commonkrpc.c @@ -1309,21 +1309,21 @@ } /* - * NFS wrapper to msleep(), that shoves a new p_sigmask and restores the - * old one after msleep() returns. + * NFS wrapper to sx_sleep(), that shoves a new p_sigmask and restores the + * old one after sx_sleep() returns. */ int -newnfs_msleep(struct thread *td, void *ident, struct mtx *mtx, int priority, char *wmesg, int timo) +newnfs_sxsleep(struct thread *td, void *ident, struct sx *sx, int priority, char *wmesg, int timo) { sigset_t oldset; int error; if ((priority & PCATCH) == 0) - return msleep(ident, mtx, priority, wmesg, timo); + return sx_sleep(ident, sx, priority, wmesg, timo); if (td == NULL) td = curthread; /* XXX */ newnfs_set_sigmask(td, &oldset); - error = msleep(ident, mtx, priority, wmesg, timo); + error = sx_sleep(ident, sx, priority, wmesg, timo); newnfs_restore_sigmask(td, &oldset); return (error); } Index: fs/nfs/nfs_var.h =================================================================== --- fs/nfs/nfs_var.h +++ fs/nfs/nfs_var.h @@ -709,7 +709,7 @@ int newnfs_nmcancelreqs(struct nfsmount *); void newnfs_set_sigmask(struct thread *, sigset_t *); void newnfs_restore_sigmask(struct thread *, sigset_t *); -int newnfs_msleep(struct thread *, void *, struct mtx *, int, char *, int); +int newnfs_sxsleep(struct thread *, void *, struct sx *, int, char *, int); int newnfs_request(struct nfsrv_descript *, struct nfsmount *, struct nfsclient *, struct nfssockreq *, vnode_t, NFSPROC_T *, struct ucred *, u_int32_t, u_int32_t, u_char *, int, u_int64_t *, Index: fs/nfs/nfsport.h =================================================================== --- fs/nfs/nfsport.h +++ fs/nfs/nfsport.h @@ -686,12 +686,12 @@ #define NFSV4ROOTLOCKMUTEXPTR (&nfs_v4root_mutex) #define NFSLOCKV4ROOTMUTEX() mtx_lock(&nfs_v4root_mutex) #define NFSUNLOCKV4ROOTMUTEX() mtx_unlock(&nfs_v4root_mutex) -#define NFSLOCKNODE(n) mtx_lock(&((n)->n_mtx)) -#define NFSUNLOCKNODE(n) mtx_unlock(&((n)->n_mtx)) +#define NFSLOCKNODE(n) sx_xlock(&((n)->n_sx)) +#define NFSUNLOCKNODE(n) sx_xunlock(&((n)->n_sx)) #define NFSLOCKMNT(m) mtx_lock(&((m)->nm_mtx)) #define NFSUNLOCKMNT(m) mtx_unlock(&((m)->nm_mtx)) -#define NFSLOCKIOD() mtx_lock(&ncl_iod_mutex) -#define NFSUNLOCKIOD() mtx_unlock(&ncl_iod_mutex) +#define NFSLOCKIOD() sx_xlock(&ncl_iod_sx) +#define NFSUNLOCKIOD() sx_xunlock(&ncl_iod_sx) #define NFSLOCKREQUEST(r) mtx_lock(&((r)->r_mtx)) #define NFSUNLOCKREQUEST(r) mtx_unlock(&((r)->r_mtx)) #define NFSLOCKSOCKREQ(r) mtx_lock(&((r)->nr_mtx)) Index: fs/nfsclient/nfs_clbio.c =================================================================== --- fs/nfsclient/nfs_clbio.c +++ fs/nfsclient/nfs_clbio.c @@ -63,7 +63,7 @@ extern int newnfs_directio_allow_mmap; extern struct nfsstatsv1 nfsstatsv1; -extern struct mtx ncl_iod_mutex; +extern struct sx ncl_iod_sx; extern int ncl_numasync; extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON]; extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON]; @@ -1475,8 +1475,8 @@ NFS_DPF(ASYNCIO, ("ncl_asyncio: waiting for mount %p queue to drain\n", nmp)); nmp->nm_bufqwant = TRUE; - error = newnfs_msleep(td, &nmp->nm_bufq, - &ncl_iod_mutex, slpflag | PRIBIO, "nfsaio", + error = newnfs_sxsleep(td, &nmp->nm_bufq, + &ncl_iod_sx, slpflag | PRIBIO, "nfsaio", slptimeo); if (error) { error2 = newnfs_sigintr(nmp, td); Index: fs/nfsclient/nfs_clnfsiod.c =================================================================== --- fs/nfsclient/nfs_clnfsiod.c +++ fs/nfsclient/nfs_clnfsiod.c @@ -70,7 +70,7 @@ #include #include -extern struct mtx ncl_iod_mutex; +extern struct sx ncl_iod_sx; extern struct task ncl_nfsiodnew_task; int ncl_numasync; @@ -169,7 +169,7 @@ { int error, i; - mtx_assert(&ncl_iod_mutex, MA_OWNED); + sx_assert(&ncl_iod_sx, SA_XLOCKED); for (i = 0; i < ncl_iodmax; i++) { if (nfs_asyncdaemon[i] == 0) { nfs_asyncdaemon[i] = 1; @@ -206,7 +206,7 @@ ncl_nfsiodnew(void) { - mtx_assert(&ncl_iod_mutex, MA_OWNED); + sx_assert(&ncl_iod_sx, SA_XLOCKED); taskqueue_enqueue(taskqueue_thread, &ncl_nfsiodnew_task); } @@ -267,7 +267,7 @@ * Always keep at least nfs_iodmin kthreads. */ timo = (myiod < nfs_iodmin) ? 0 : nfs_iodmaxidle * hz; - error = msleep(&ncl_iodwant[myiod], &ncl_iod_mutex, PWAIT | PCATCH, + error = sx_sleep(&ncl_iodwant[myiod], &ncl_iod_sx, PWAIT | PCATCH, "-", timo); if (error) { nmp = ncl_iodmount[myiod]; Index: fs/nfsclient/nfs_clnode.c =================================================================== --- fs/nfsclient/nfs_clnode.c +++ fs/nfsclient/nfs_clnode.c @@ -142,7 +142,7 @@ * destroy the mutex (in the case of the loser, or if hash_insert * happened to return an error no special casing is needed). */ - mtx_init(&np->n_mtx, "NEWNFSnode lock", NULL, MTX_DEF | MTX_DUPOK); + sx_init_flags(&np->n_sx, "NEWNFSnode lock", SX_DUPOK); lockinit(&np->n_excl, PVFS, "nfsupg", VLKTIMEOUT, LK_NOSHARE | LK_CANRECURSE); @@ -171,7 +171,7 @@ if (error != 0) { *npp = NULL; free(np->n_fhp, M_NFSFH); - mtx_destroy(&np->n_mtx); + sx_destroy(&np->n_sx); lockdestroy(&np->n_excl); uma_zfree(newnfsnode_zone, np); return (error); @@ -212,7 +212,7 @@ ASSERT_VOP_ELOCKED(vp, "releasesillyrename"); np = VTONFS(vp); - mtx_assert(&np->n_mtx, MA_OWNED); + sx_assert(&np->n_sx, SA_XLOCKED); if (vp->v_type != VDIR) { sp = np->n_sillyrename; np->n_sillyrename = NULL; @@ -332,7 +332,7 @@ free(np->n_fhp, M_NFSFH); if (np->n_v4 != NULL) free(np->n_v4, M_NFSV4NODE); - mtx_destroy(&np->n_mtx); + sx_destroy(&np->n_sx); lockdestroy(&np->n_excl); uma_zfree(newnfsnode_zone, vp->v_data); vp->v_data = NULL; Index: fs/nfsclient/nfs_clport.c =================================================================== --- fs/nfsclient/nfs_clport.c +++ fs/nfsclient/nfs_clport.c @@ -84,7 +84,7 @@ extern int nfscl_enablecallb; extern int nfs_numnfscbd; extern int nfscl_inited; -struct mtx ncl_iod_mutex; +struct sx ncl_iod_sx; NFSDLOCKMUTEX; extern struct mtx nfsrv_dslock_mtx; @@ -227,12 +227,12 @@ vp->v_data = np; np->n_vnode = vp; /* - * Initialize the mutex even if the vnode is going to be a loser. + * Initialize the lock even if the vnode is going to be a loser. * This simplifies the logic in reclaim, which can then unconditionally - * destroy the mutex (in the case of the loser, or if hash_insert + * destroy the lock (in the case of the loser, or if hash_insert * happened to return an error no special casing is needed). */ - mtx_init(&np->n_mtx, "NEWNFSnode lock", NULL, MTX_DEF | MTX_DUPOK); + sx_init_flags(&np->n_sx, "NEWNFSnode lock", SX_DUPOK); lockinit(&np->n_excl, PVFS, "nfsupg", VLKTIMEOUT, LK_NOSHARE | LK_CANRECURSE); @@ -275,7 +275,7 @@ error = insmntque(vp, mntp); if (error != 0) { *npp = NULL; - mtx_destroy(&np->n_mtx); + sx_destroy(&np->n_sx); lockdestroy(&np->n_excl); free(nfhp, M_NFSFH); if (np->n_v4 != NULL) @@ -415,9 +415,7 @@ struct nfsmount *nmp; struct timespec mtime_save; vm_object_t object; - u_quad_t nsize; int error, force_fid_err; - bool setnsize; error = 0; @@ -565,26 +563,9 @@ if (np->n_attrstamp != 0) KDTRACE_NFS_ATTRCACHE_LOAD_DONE(vp, vap, error); #endif - nsize = vap->va_size; - object = vp->v_object; - setnsize = false; - if (object != NULL) { - if (OFF_TO_IDX(nsize + PAGE_MASK) < object->size) { - /* - * When shrinking the size, the call to - * vnode_pager_setsize() cannot be done with - * the mutex held, because we might need to - * wait for a busy page. Delay it until after - * the node is unlocked. - */ - setnsize = true; - } else { - vnode_pager_setsize(vp, nsize); - } - } + if (vp->v_object != NULL) + vnode_pager_setsize(vp, vap->va_size); NFSUNLOCKNODE(np); - if (setnsize) - vnode_pager_setsize(vp, nsize); return (error); } @@ -1338,7 +1319,7 @@ if (loaded) return (0); newnfs_portinit(); - mtx_init(&ncl_iod_mutex, "ncl_iod_mutex", NULL, MTX_DEF); + sx_init(&ncl_iod_sx, "ncl_iod_sx"); nfscl_init(); NFSD_LOCK(); nfsrvd_cbinit(0); @@ -1362,7 +1343,7 @@ nfsd_call_nfscl = NULL; uma_zdestroy(ncl_pbuf_zone); /* and get rid of the mutexes */ - mtx_destroy(&ncl_iod_mutex); + sx_destroy(&ncl_iod_sx); loaded = 0; break; #else Index: fs/nfsclient/nfs_clsubs.c =================================================================== --- fs/nfsclient/nfs_clsubs.c +++ fs/nfsclient/nfs_clsubs.c @@ -80,7 +80,7 @@ */ #include -extern struct mtx ncl_iod_mutex; +extern struct sx ncl_iod_sx; extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON]; extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON]; extern int ncl_numasync; @@ -109,7 +109,7 @@ wakeup(&ncl_iodwant[i]); /* The last nfsiod to exit will wake us up when ncl_numasync hits 0 */ while (ncl_numasync) - msleep(&ncl_numasync, &ncl_iod_mutex, PWAIT, "ioddie", 0); + sx_sleep(&ncl_numasync, &ncl_iod_sx, PWAIT, "ioddie", 0); NFSUNLOCKIOD(); ncl_nhuninit(); return (0); @@ -123,7 +123,7 @@ { NFSLOCKNODE(np); while (np->n_flag & NDIRCOOKIELK) - (void) msleep(&np->n_flag, &np->n_mtx, PZERO, "nfsdirlk", 0); + sx_sleep(&np->n_flag, &np->n_sx, PZERO, "nfsdirlk", 0); np->n_flag |= NDIRCOOKIELK; NFSUNLOCKNODE(np); } Index: fs/nfsclient/nfs_clvfsops.c =================================================================== --- fs/nfsclient/nfs_clvfsops.c +++ fs/nfsclient/nfs_clvfsops.c @@ -84,7 +84,7 @@ extern int nfscl_debuglevel; extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON]; extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON]; -extern struct mtx ncl_iod_mutex; +extern struct sx ncl_iod_sx; NFSCLSTATEMUTEX; extern struct mtx nfsrv_dslock_mtx; Index: fs/nfsclient/nfs_clvnops.c =================================================================== --- fs/nfsclient/nfs_clvnops.c +++ fs/nfsclient/nfs_clvnops.c @@ -283,12 +283,12 @@ * SMP Locking Note : * The list of locks after the description of the lock is the ordering * of other locks acquired with the lock held. - * np->n_mtx : Protects the fields in the nfsnode. + * np->n_sx : Protects the fields in the nfsnode. VM Object Lock VI_MTX (acquired indirectly) * nmp->nm_mtx : Protects the fields in the nfsmount. rep->r_mtx - * ncl_iod_mutex : Global lock, protects shared nfsiod state. + * ncl_iod_sx : Global lock, protects shared nfsiod state. * nfs_reqq_mtx : Global lock, protects the nfs_reqq list. nmp->nm_mtx rep->r_mtx @@ -1095,7 +1095,7 @@ NFSLOCKNODE(np); while (NFSHASNFSV4(nmp) && (np->n_flag & NREMOVEINPROG)) { np->n_flag |= NREMOVEWANT; - (void) msleep((caddr_t)np, &np->n_mtx, PZERO, "nfslkup", 0); + sx_sleep(np, &np->n_sx, PZERO, "nfslkup", 0); } NFSUNLOCKNODE(np); @@ -1596,7 +1596,7 @@ NFSLOCKNODE(dnp); while (NFSHASNFSV4(nmp) && (dnp->n_flag & NREMOVEINPROG)) { dnp->n_flag |= NREMOVEWANT; - (void) msleep((caddr_t)dnp, &dnp->n_mtx, PZERO, "nfscrt", 0); + sx_sleep(dnp, &dnp->n_sx, PZERO, "nfscrt", 0); } NFSUNLOCKNODE(dnp); @@ -2985,8 +2985,8 @@ NFSLOCKNODE(np); while (np->n_directio_asyncwr > 0) { np->n_flag |= NFSYNCWAIT; - error = newnfs_msleep(td, &np->n_directio_asyncwr, - &np->n_mtx, slpflag | (PRIBIO + 1), + error = newnfs_sxsleep(td, &np->n_directio_asyncwr, + &np->n_sx, slpflag | (PRIBIO + 1), "nfsfsync", 0); if (error) { if (newnfs_sigintr(nmp, td)) { Index: fs/nfsclient/nfsnode.h =================================================================== --- fs/nfsclient/nfsnode.h +++ fs/nfsclient/nfsnode.h @@ -93,7 +93,7 @@ * be well aligned and, therefore, tightly packed. */ struct nfsnode { - struct mtx n_mtx; /* Protects all of these members */ + struct sx n_sx; /* Protects all of these members */ struct lock n_excl; /* Exclusive helper for shared vnode lock */ u_quad_t n_size; /* Current size of file */