Index: kern/kern_rangelock.c =================================================================== --- kern/kern_rangelock.c +++ kern/kern_rangelock.c @@ -57,10 +57,15 @@ SYSINIT(vfs, SI_SUB_LOCK, SI_ORDER_ANY, rangelock_sys_init, NULL); static struct rl_q_entry * -rlqentry_alloc(void) +rlqentry_alloc(int cansleep) { + int zalloc_flag; - return (uma_zalloc(rl_entry_zone, M_WAITOK)); + if (cansleep == 1) + zalloc_flag = M_WAITOK; + else + zalloc_flag = M_NOWAIT; + return (uma_zalloc(rl_entry_zone, zalloc_flag)); } void @@ -196,11 +201,11 @@ /* * Add the lock request to the queue of the pending requests for - * rangelock. Sleep until the request can be granted. + * rangelock. Sleep until the request can be granted unless cansleep == 0. */ static void * rangelock_enqueue(struct rangelock *lock, off_t start, off_t end, int mode, - struct mtx *ilk) + struct mtx *ilk, int cansleep) { struct rl_q_entry *entry; struct thread *td; @@ -211,8 +216,11 @@ if (td->td_rlqe != NULL) { entry = td->td_rlqe; td->td_rlqe = NULL; - } else - entry = rlqentry_alloc(); + } else { + entry = rlqentry_alloc(cansleep); + if (cansleep == 0 && entry == NULL) + return (NULL); + } MPASS(entry != NULL); entry->rl_q_flags = mode; entry->rl_q_start = start; @@ -226,11 +234,30 @@ */ TAILQ_INSERT_TAIL(&lock->rl_waiters, entry, rl_q_link); + /* + * If rl_currdep == NULL, there is no entry waiting for a conflicting + * range to be resolved, so set rl_currdep to this entry. If there is + * no conflicting entry for this entry, rl_currdep will be set back to + * NULL by rangelock_calc_block(). + */ if (lock->rl_currdep == NULL) lock->rl_currdep = entry; rangelock_calc_block(lock); - while (!(entry->rl_q_flags & RL_LOCK_GRANTED)) + while (!(entry->rl_q_flags & RL_LOCK_GRANTED)) { + if (cansleep == 0) { + /* + * If rl_currdep is this entry, rl_currdep needs to + * be set to the next entry in the rl_waiters list. + * However, since this entry is the last entry in the + * list, the next entry is NULL. + */ + if (lock->rl_currdep == entry) + lock->rl_currdep = NULL; + rangelock_unlock_locked(lock, entry, ilk); + return (NULL); + } msleep(entry, ilk, 0, "range", 0); + } mtx_unlock(ilk); return (entry); } @@ -239,12 +266,28 @@ rangelock_rlock(struct rangelock *lock, off_t start, off_t end, struct mtx *ilk) { - return (rangelock_enqueue(lock, start, end, RL_LOCK_READ, ilk)); + return (rangelock_enqueue(lock, start, end, RL_LOCK_READ, ilk, 1)); } void * +rangelock_rlock_trylock(struct rangelock *lock, off_t start, off_t end, + struct mtx *ilk) +{ + + return (rangelock_enqueue(lock, start, end, RL_LOCK_READ, ilk, 0)); +} + +void * rangelock_wlock(struct rangelock *lock, off_t start, off_t end, struct mtx *ilk) { - return (rangelock_enqueue(lock, start, end, RL_LOCK_WRITE, ilk)); + return (rangelock_enqueue(lock, start, end, RL_LOCK_WRITE, ilk, 1)); +} + +void * +rangelock_wlock_trylock(struct rangelock *lock, off_t start, off_t end, + struct mtx *ilk) +{ + + return (rangelock_enqueue(lock, start, end, RL_LOCK_WRITE, ilk, 0)); } Index: sys/rangelock.h =================================================================== --- sys/rangelock.h +++ sys/rangelock.h @@ -75,8 +75,12 @@ off_t start, off_t end, struct mtx *ilk); void *rangelock_rlock(struct rangelock *lock, off_t start, off_t end, struct mtx *ilk); +void *rangelock_rlock_trylock(struct rangelock *lock, off_t start, + off_t end, struct mtx *ilk); void *rangelock_wlock(struct rangelock *lock, off_t start, off_t end, struct mtx *ilk); +void *rangelock_wlock_trylock(struct rangelock *lock, off_t start, + off_t end, struct mtx *ilk); void rlqentry_free(struct rl_q_entry *rlqe); #endif /* _KERNEL */ Index: sys/vnode.h =================================================================== --- sys/vnode.h +++ sys/vnode.h @@ -723,8 +723,12 @@ VI_MTX(vp)) #define vn_rangelock_rlock(vp, start, end) \ rangelock_rlock(&(vp)->v_rl, (start), (end), VI_MTX(vp)) +#define vn_rangelock_rlock_trylock(vp, start, end) \ + rangelock_rlock_trylock(&(vp)->v_rl, (start), (end), VI_MTX(vp)) #define vn_rangelock_wlock(vp, start, end) \ rangelock_wlock(&(vp)->v_rl, (start), (end), VI_MTX(vp)) +#define vn_rangelock_wlock_trylock(vp, start, end) \ + rangelock_wlock_trylock(&(vp)->v_rl, (start), (end), VI_MTX(vp)) int vfs_cache_lookup(struct vop_lookup_args *ap); void vfs_timestamp(struct timespec *);