diff --git a/sys/kern/kern_rangelock.c b/sys/kern/kern_rangelock.c --- a/sys/kern/kern_rangelock.c +++ b/sys/kern/kern_rangelock.c @@ -462,6 +462,10 @@ smr_enter(rl_smr); } +/* + * Try to insert an entry into the queue. Return true if successful, otherwise + * false. + */ static bool rl_q_cas(struct rl_q_entry **prev, struct rl_q_entry *old, struct rl_q_entry *new) @@ -517,15 +521,51 @@ enum RL_INSERT_RES { RL_TRYLOCK_FAILED, + RL_TRYLOCK_FAILED_MARKED, RL_LOCK_SUCCESS, RL_LOCK_RETRY, }; +/* + * Handle a possible lock conflict between cur and e. + */ +static enum RL_INSERT_RES +rl_conflict(struct rangelock *lock, struct rl_q_entry *cur, struct rl_q_entry *e, + bool trylock) +{ + sleepq_lock(&lock->sleepers); + if (rl_e_is_marked(rl_q_load(&e->rl_q_next))) { + sleepq_release(&lock->sleepers); + return (RL_LOCK_SUCCESS); /* no conflict after all */ + } + KASSERT(cur->rl_q_owner != curthread, + ("%s: conflicting range is locked by the current thread", + __func__)); + rangelock_unlock_int(lock, e); + if (trylock) { + sleepq_release(&lock->sleepers); + + /* + * The lock acquisition failed, but the marked queue entry is + * already visible to other threads and thus is not safe to + * free by the current thread. + */ + return (RL_TRYLOCK_FAILED_MARKED); + } + rl_insert_sleep(lock); + return (RL_LOCK_RETRY); +} + +/* + * Having inserted entry e, verify that no conflicting write locks are present; + * clean up dead entries that we encounter along the way. + */ static enum RL_INSERT_RES rl_r_validate(struct rangelock *lock, struct rl_q_entry *e, bool trylock, struct rl_q_entry **free) { struct rl_q_entry *cur, *next, **prev; + enum RL_INSERT_RES res; again: prev = &e->rl_q_next; @@ -550,20 +590,10 @@ cur = rl_e_unmark_unchecked(rl_q_load(prev)); continue; } - if (!rl_e_is_marked(rl_q_load(&cur->rl_q_next))) { - sleepq_lock(&lock->sleepers); - if (rl_e_is_marked(rl_q_load(&cur->rl_q_next))) { - sleepq_release(&lock->sleepers); - continue; - } - rangelock_unlock_int(lock, e); - if (trylock) { - sleepq_release(&lock->sleepers); - return (RL_TRYLOCK_FAILED); - } - rl_insert_sleep(lock); - return (RL_LOCK_RETRY); - } + + res = rl_conflict(lock, cur, e, trylock); + if (res != RL_LOCK_SUCCESS) + return (res); } } @@ -572,6 +602,7 @@ bool trylock, struct rl_q_entry **free) { struct rl_q_entry *cur, *next, **prev; + enum RL_INSERT_RES res; again: prev = (struct rl_q_entry **)&lock->head; @@ -596,20 +627,10 @@ cur = rl_e_unmark_unchecked(rl_q_load(prev)); continue; } - sleepq_lock(&lock->sleepers); - /* Reload after sleepq is locked */ - next = rl_q_load(&cur->rl_q_next); - if (rl_e_is_marked(next)) { - sleepq_release(&lock->sleepers); - goto again; - } - rangelock_unlock_int(lock, e); - if (trylock) { - sleepq_release(&lock->sleepers); - return (RL_TRYLOCK_FAILED); - } - rl_insert_sleep(lock); - return (RL_LOCK_RETRY); + + res = rl_conflict(lock, cur, e, trylock); + if (res != RL_LOCK_SUCCESS) + return (res); } } @@ -659,6 +680,9 @@ sleepq_release(&lock->sleepers); continue; } + KASSERT(cur->rl_q_owner != curthread, + ("%s: conflicting range is locked by the current thread", + __func__)); if (trylock) { sleepq_release(&lock->sleepers); return (RL_TRYLOCK_FAILED); @@ -697,10 +721,12 @@ smr_enter(rl_smr); res = rl_insert(lock, e, trylock, &free); smr_exit(rl_smr); - if (res == RL_TRYLOCK_FAILED) { + if (res == RL_TRYLOCK_FAILED || res == RL_TRYLOCK_FAILED_MARKED) { MPASS(trylock); - e->rl_q_free = free; - free = e; + if (res == RL_TRYLOCK_FAILED) { + e->rl_q_free = free; + free = e; + } e = NULL; } rangelock_free_free(free);