Changeset View
Standalone View
kern/kern_rangelock.c
Show First 20 Lines • Show All 135 Lines • ▼ Show 20 Lines | for (entry = lock->rl_currdep; entry != NULL; entry = nextentry) { | ||||
wakeup(entry); | wakeup(entry); | ||||
} | } | ||||
out: | out: | ||||
lock->rl_currdep = entry; | lock->rl_currdep = entry; | ||||
} | } | ||||
static void | static void | ||||
rangelock_unlock_locked(struct rangelock *lock, struct rl_q_entry *entry, | rangelock_unlock_locked(struct rangelock *lock, struct rl_q_entry *entry, | ||||
struct mtx *ilk) | struct mtx *ilk, int do_calc_block) | ||||
kib: Please use bool and true/false. | |||||
{ | { | ||||
MPASS(lock != NULL && entry != NULL && ilk != NULL); | MPASS(lock != NULL && entry != NULL && ilk != NULL); | ||||
mtx_assert(ilk, MA_OWNED); | mtx_assert(ilk, MA_OWNED); | ||||
KASSERT(entry != lock->rl_currdep, ("stuck currdep")); | KASSERT(entry != lock->rl_currdep, ("stuck currdep")); | ||||
TAILQ_REMOVE(&lock->rl_waiters, entry, rl_q_link); | TAILQ_REMOVE(&lock->rl_waiters, entry, rl_q_link); | ||||
if (do_calc_block != 0) | |||||
rangelock_calc_block(lock); | rangelock_calc_block(lock); | ||||
mtx_unlock(ilk); | mtx_unlock(ilk); | ||||
if (curthread->td_rlqe == NULL) | if (curthread->td_rlqe == NULL) | ||||
curthread->td_rlqe = entry; | curthread->td_rlqe = entry; | ||||
else | else | ||||
rlqentry_free(entry); | rlqentry_free(entry); | ||||
} | } | ||||
void | void | ||||
rangelock_unlock(struct rangelock *lock, void *cookie, struct mtx *ilk) | rangelock_unlock(struct rangelock *lock, void *cookie, struct mtx *ilk) | ||||
{ | { | ||||
MPASS(lock != NULL && cookie != NULL && ilk != NULL); | MPASS(lock != NULL && cookie != NULL && ilk != NULL); | ||||
mtx_lock(ilk); | mtx_lock(ilk); | ||||
rangelock_unlock_locked(lock, cookie, ilk); | rangelock_unlock_locked(lock, cookie, ilk, 1); | ||||
} | } | ||||
/* | /* | ||||
* Unlock the sub-range of granted lock. | * Unlock the sub-range of granted lock. | ||||
*/ | */ | ||||
void * | void * | ||||
rangelock_unlock_range(struct rangelock *lock, void *cookie, off_t start, | rangelock_unlock_range(struct rangelock *lock, void *cookie, off_t start, | ||||
off_t end, struct mtx *ilk) | off_t end, struct mtx *ilk) | ||||
{ | { | ||||
struct rl_q_entry *entry; | struct rl_q_entry *entry; | ||||
MPASS(lock != NULL && cookie != NULL && ilk != NULL); | MPASS(lock != NULL && cookie != NULL && ilk != NULL); | ||||
entry = cookie; | entry = cookie; | ||||
KASSERT(entry->rl_q_flags & RL_LOCK_GRANTED, | KASSERT(entry->rl_q_flags & RL_LOCK_GRANTED, | ||||
("Unlocking non-granted lock")); | ("Unlocking non-granted lock")); | ||||
KASSERT(entry->rl_q_start == start, ("wrong start")); | KASSERT(entry->rl_q_start == start, ("wrong start")); | ||||
KASSERT(entry->rl_q_end >= end, ("wrong end")); | KASSERT(entry->rl_q_end >= end, ("wrong end")); | ||||
mtx_lock(ilk); | mtx_lock(ilk); | ||||
if (entry->rl_q_end == end) { | if (entry->rl_q_end == end) { | ||||
rangelock_unlock_locked(lock, cookie, ilk); | rangelock_unlock_locked(lock, cookie, ilk, 1); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
entry->rl_q_end = end; | entry->rl_q_end = end; | ||||
rangelock_calc_block(lock); | rangelock_calc_block(lock); | ||||
mtx_unlock(ilk); | mtx_unlock(ilk); | ||||
return (cookie); | return (cookie); | ||||
} | } | ||||
/* | /* | ||||
* Add the lock request to the queue of the pending requests for | * Add the lock request to the queue of the pending requests for | ||||
* rangelock. Sleep until the request can be granted. | * rangelock. Sleep until the request can be granted unless trylock != 0. | ||||
*/ | */ | ||||
static void * | static void * | ||||
rangelock_enqueue(struct rangelock *lock, off_t start, off_t end, int mode, | rangelock_enqueue(struct rangelock *lock, off_t start, off_t end, int mode, | ||||
struct mtx *ilk) | struct mtx *ilk, int trylock) | ||||
kibUnsubmitted Done Inline ActionsSame, please use bool. kib: Same, please use bool. | |||||
{ | { | ||||
struct rl_q_entry *entry; | struct rl_q_entry *entry; | ||||
struct thread *td; | struct thread *td; | ||||
MPASS(lock != NULL && ilk != NULL); | MPASS(lock != NULL && ilk != NULL); | ||||
td = curthread; | td = curthread; | ||||
if (td->td_rlqe != NULL) { | if (td->td_rlqe != NULL) { | ||||
Show All 9 Lines | rangelock_enqueue(struct rangelock *lock, off_t start, off_t end, int mode, | ||||
mtx_lock(ilk); | mtx_lock(ilk); | ||||
/* | /* | ||||
* XXXKIB TODO. Check that a thread does not try to enqueue a | * XXXKIB TODO. Check that a thread does not try to enqueue a | ||||
* lock that is incompatible with another request from the same | * lock that is incompatible with another request from the same | ||||
* thread. | * thread. | ||||
*/ | */ | ||||
TAILQ_INSERT_TAIL(&lock->rl_waiters, entry, rl_q_link); | TAILQ_INSERT_TAIL(&lock->rl_waiters, entry, rl_q_link); | ||||
/* | |||||
* If rl_currdep == NULL, there is no entry waiting for a conflicting | |||||
* range to be resolved, so set rl_currdep to this entry. If there is | |||||
* no conflicting entry for this entry, rl_currdep will be set back to | |||||
* NULL by rangelock_calc_block(). | |||||
*/ | |||||
if (lock->rl_currdep == NULL) | if (lock->rl_currdep == NULL) | ||||
lock->rl_currdep = entry; | lock->rl_currdep = entry; | ||||
rangelock_calc_block(lock); | rangelock_calc_block(lock); | ||||
while (!(entry->rl_q_flags & RL_LOCK_GRANTED)) | while (!(entry->rl_q_flags & RL_LOCK_GRANTED)) { | ||||
if (trylock != 0) { | |||||
/* | |||||
* If rl_currdep is this entry, rl_currdep needs to | |||||
* be set to the next entry in the rl_waiters list. | |||||
* However, since this entry is the last entry in the | |||||
* list, the next entry is NULL. | |||||
*/ | |||||
Done Inline ActionsYou should assert that the entry is indeed last, i..e. that its next is NULL. kib: You should assert that the entry is indeed last, i..e. that its next is NULL. | |||||
if (lock->rl_currdep == entry) { | |||||
KASSERT(TAILQ_NEXT(lock->rl_currdep, | |||||
rl_q_link) == NULL, | |||||
Done Inline ActionsI do not think that unlock_locked() is needed. Any non-granted entry can be removed from the list directly, and its removal cannot affect the list of currently granted range locks because it was not granted. kib: I do not think that unlock_locked() is needed. Any non-granted entry can be removed from the… | |||||
Done Inline ActionsI agree that the rangelock_calc_block() call in rangelock_unlock_locked() is a no op rmacklem: I agree that the rangelock_calc_block() call in rangelock_unlock_locked() is a no op
in this… | |||||
("rangelock_enqueue: next entry not NULL")); | |||||
lock->rl_currdep = NULL; | |||||
} | |||||
/* | |||||
* For this case, the range is not actually locked | |||||
* yet, but removal from the list requires the same | |||||
* steps, except for not doing a rangelock_calc_block() | |||||
* call, since rangelock_calc_block() was called above. | |||||
*/ | |||||
rangelock_unlock_locked(lock, entry, ilk, 0); | |||||
Not Done Inline ActionsI think that all code inside if (trylock) {... from start up to the rangelock_unlock_locked() call (except the call itself) would be more naturally placed in the rangelock_unlock_locked() itself. In _unlock_locked(), you would call rangelock_calc_block() if trylock is true, and do reset of rl_currdep as needed, otherwise. kib: I think that all code inside `if (trylock) {...` from start up to the rangelock_unlock_locked()… | |||||
return (NULL); | |||||
} | |||||
msleep(entry, ilk, 0, "range", 0); | msleep(entry, ilk, 0, "range", 0); | ||||
} | |||||
mtx_unlock(ilk); | mtx_unlock(ilk); | ||||
return (entry); | return (entry); | ||||
} | } | ||||
void * | void * | ||||
rangelock_rlock(struct rangelock *lock, off_t start, off_t end, struct mtx *ilk) | rangelock_rlock(struct rangelock *lock, off_t start, off_t end, struct mtx *ilk) | ||||
{ | { | ||||
return (rangelock_enqueue(lock, start, end, RL_LOCK_READ, ilk)); | return (rangelock_enqueue(lock, start, end, RL_LOCK_READ, ilk, 0)); | ||||
} | } | ||||
void * | void * | ||||
rangelock_rlock_trylock(struct rangelock *lock, off_t start, off_t end, | |||||
Not Done Inline ActionsI suggest rangelock_tryrlock() name. kib: I suggest rangelock_tryrlock() name. | |||||
struct mtx *ilk) | |||||
{ | |||||
return (rangelock_enqueue(lock, start, end, RL_LOCK_READ, ilk, 1)); | |||||
Done Inline ActionsI disagree with the addition of non-sleepable flag. This would make trylock fail due to issue not related to the range lock state. I do not this that a right semantic for rangelock try is to not ever sleep, it should be only 'do not sleep waiting for granting the rangelock'. kib: I disagree with the addition of non-sleepable flag. This would make trylock fail due to issue… | |||||
Done Inline ActionsYes. I was thinking "nonblock" should mean doesn't sleep, but "trylock" means rmacklem: Yes. I was thinking "nonblock" should mean doesn't sleep, but "trylock" means
don't sleep for… | |||||
} | |||||
void * | |||||
rangelock_wlock(struct rangelock *lock, off_t start, off_t end, struct mtx *ilk) | rangelock_wlock(struct rangelock *lock, off_t start, off_t end, struct mtx *ilk) | ||||
{ | { | ||||
return (rangelock_enqueue(lock, start, end, RL_LOCK_WRITE, ilk)); | return (rangelock_enqueue(lock, start, end, RL_LOCK_WRITE, ilk, 0)); | ||||
} | |||||
void * | |||||
rangelock_wlock_trylock(struct rangelock *lock, off_t start, off_t end, | |||||
Not Done Inline Actionsrangelock_trywlock() kib: rangelock_trywlock() | |||||
struct mtx *ilk) | |||||
{ | |||||
return (rangelock_enqueue(lock, start, end, RL_LOCK_WRITE, ilk, 1)); | |||||
} | } |
Please use bool and true/false.