Changeset View
Changeset View
Standalone View
Standalone View
sys/compat/linux/linux_futex.c
Show First 20 Lines • Show All 102 Lines • ▼ Show 20 Lines | |||||
LIN_SDT_PROBE_DEFINE1(futex, release_futexes, copyin_error, "int"); | LIN_SDT_PROBE_DEFINE1(futex, release_futexes, copyin_error, "int"); | ||||
#define FUTEX_SHARED 0x8 /* shared futex */ | #define FUTEX_SHARED 0x8 /* shared futex */ | ||||
#define GET_SHARED(a) (a->flags & FUTEX_SHARED) ? AUTO_SHARE : THREAD_SHARE | #define GET_SHARED(a) (a->flags & FUTEX_SHARED) ? AUTO_SHARE : THREAD_SHARE | ||||
static int futex_atomic_op(struct thread *, int, uint32_t *); | static int futex_atomic_op(struct thread *, int, uint32_t *); | ||||
static int handle_futex_death(struct thread *td, struct linux_emuldata *, | static int handle_futex_death(struct thread *td, struct linux_emuldata *, | ||||
uint32_t *, unsigned int); | uint32_t *, unsigned int, bool); | ||||
static int fetch_robust_entry(struct linux_robust_list **, | static int fetch_robust_entry(struct linux_robust_list **, | ||||
struct linux_robust_list **, unsigned int *); | struct linux_robust_list **, unsigned int *); | ||||
struct linux_futex_args { | struct linux_futex_args { | ||||
uint32_t *uaddr; | uint32_t *uaddr; | ||||
int32_t op; | int32_t op; | ||||
uint32_t flags; | uint32_t flags; | ||||
bool clockrt; | bool clockrt; | ||||
▲ Show 20 Lines • Show All 859 Lines • ▼ Show 20 Lines | LIN_SDT_PROBE1(futex, linux_get_robust_list, copyout_error, | ||||
error); | error); | ||||
} | } | ||||
return (error); | return (error); | ||||
} | } | ||||
static int | static int | ||||
handle_futex_death(struct thread *td, struct linux_emuldata *em, uint32_t *uaddr, | handle_futex_death(struct thread *td, struct linux_emuldata *em, uint32_t *uaddr, | ||||
unsigned int pi) | unsigned int pi, bool pending_op) | ||||
{ | { | ||||
uint32_t uval, nval, mval; | uint32_t uval, nval, mval; | ||||
int error; | int error; | ||||
/* Check that futex address is a 32bit aligned. */ | /* Check that futex address is a 32bit aligned. */ | ||||
if (!__is_aligned(uaddr, sizeof(uint32_t))) | if (!__is_aligned(uaddr, sizeof(uint32_t))) | ||||
return (EINVAL); | return (EINVAL); | ||||
retry: | retry: | ||||
error = fueword32(uaddr, &uval); | error = fueword32(uaddr, &uval); | ||||
if (error != 0) | if (error != 0) | ||||
return (EFAULT); | return (EFAULT); | ||||
/* | |||||
* Special case for regular (non PI) futexes. The unlock path in | |||||
* user space has two race scenarios: | |||||
* | |||||
* 1. The unlock path releases the user space futex value and | |||||
* before it can execute the futex() syscall to wake up | |||||
* waiters it is killed. | |||||
* | |||||
* 2. A woken up waiter is killed before it can acquire the | |||||
* futex in user space. | |||||
* | |||||
* In both cases the TID validation below prevents a wakeup of | |||||
* potential waiters which can cause these waiters to block | |||||
* forever. | |||||
* | |||||
* In both cases it is safe to attempt waking up a potential | |||||
* waiter without touching the user space futex value and trying | |||||
* to set the OWNER_DIED bit. | |||||
*/ | |||||
if (pending_op && !pi && !uval) { | |||||
(void)futex_wake(td, uaddr, 1, true); | |||||
return (0); | |||||
} | |||||
if ((uval & FUTEX_TID_MASK) == em->em_tid) { | if ((uval & FUTEX_TID_MASK) == em->em_tid) { | ||||
mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; | mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; | ||||
error = casueword32(uaddr, uval, &nval, mval); | error = casueword32(uaddr, uval, &nval, mval); | ||||
if (error == -1) | if (error == -1) | ||||
return (EFAULT); | return (EFAULT); | ||||
if (error == 1) { | if (error == 1) { | ||||
error = thread_check_susp(td, false); | error = thread_check_susp(td, false); | ||||
if (error != 0) | if (error != 0) | ||||
Show All 29 Lines | fetch_robust_entry(struct linux_robust_list **entry, | ||||
} | } | ||||
*entry = (void *)(uentry & ~1UL); | *entry = (void *)(uentry & ~1UL); | ||||
*pi = uentry & 1; | *pi = uentry & 1; | ||||
return (0); | return (0); | ||||
} | } | ||||
#define LINUX_HANDLE_DEATH_PENDING true | |||||
#define LINUX_HANDLE_DEATH_LIST false | |||||
/* This walks the list of robust futexes releasing them. */ | /* This walks the list of robust futexes releasing them. */ | ||||
void | void | ||||
release_futexes(struct thread *td, struct linux_emuldata *em) | release_futexes(struct thread *td, struct linux_emuldata *em) | ||||
{ | { | ||||
struct linux_robust_list_head *head = NULL; | struct linux_robust_list_head *head = NULL; | ||||
struct linux_robust_list *entry, *next_entry, *pending; | struct linux_robust_list *entry, *next_entry, *pending; | ||||
unsigned int limit = 2048, pi, next_pi, pip; | unsigned int limit = 2048, pi, next_pi, pip; | ||||
uint32_t *uaddr; | |||||
l_long futex_offset; | l_long futex_offset; | ||||
int rc, error; | int rc, error; | ||||
head = em->robust_futexes; | head = em->robust_futexes; | ||||
if (head == NULL) | if (head == NULL) | ||||
return; | return; | ||||
if (fetch_robust_entry(&entry, PTRIN(&head->list.next), &pi)) | if (fetch_robust_entry(&entry, PTRIN(&head->list.next), &pi)) | ||||
return; | return; | ||||
error = copyin(&head->futex_offset, &futex_offset, | error = copyin(&head->futex_offset, &futex_offset, | ||||
sizeof(futex_offset)); | sizeof(futex_offset)); | ||||
if (error) { | if (error) { | ||||
LIN_SDT_PROBE1(futex, release_futexes, copyin_error, error); | LIN_SDT_PROBE1(futex, release_futexes, copyin_error, error); | ||||
return; | return; | ||||
} | } | ||||
if (fetch_robust_entry(&pending, PTRIN(&head->pending_list), &pip)) | if (fetch_robust_entry(&pending, PTRIN(&head->pending_list), &pip)) | ||||
return; | return; | ||||
while (entry != &head->list) { | while (entry != &head->list) { | ||||
rc = fetch_robust_entry(&next_entry, PTRIN(&entry->next), &next_pi); | rc = fetch_robust_entry(&next_entry, PTRIN(&entry->next), &next_pi); | ||||
if (entry != pending) | /* | ||||
if (handle_futex_death(td, em, | * A pending lock might already be on the list, so | ||||
(uint32_t *)((caddr_t)entry + futex_offset), pi)) { | * don't process it twice. | ||||
*/ | |||||
if (entry != pending) { | |||||
uaddr = (uint32_t *)((caddr_t)entry + futex_offset); | |||||
if (handle_futex_death(td, em, uaddr, pi, | |||||
LINUX_HANDLE_DEATH_LIST)) | |||||
return; | return; | ||||
} | } | ||||
if (rc) | if (rc) | ||||
return; | return; | ||||
entry = next_entry; | entry = next_entry; | ||||
pi = next_pi; | pi = next_pi; | ||||
if (!--limit) | if (!--limit) | ||||
break; | break; | ||||
sched_relinquish(curthread); | sched_relinquish(curthread); | ||||
} | } | ||||
if (pending) | if (pending) { | ||||
handle_futex_death(td, em, | uaddr = (uint32_t *)((caddr_t)pending + futex_offset); | ||||
(uint32_t *)((caddr_t)pending + futex_offset), pip); | (void)handle_futex_death(td, em, uaddr, pip, | ||||
LINUX_HANDLE_DEATH_PENDING); | |||||
} | |||||
} | } |