Index: sys/kern/kern_umtx.c =================================================================== --- sys/kern/kern_umtx.c +++ sys/kern/kern_umtx.c @@ -219,7 +219,11 @@ struct timespec end; }; -#ifdef COMPAT_FREEBSD32 +#if defined(COMPAT_FREEBSD32) +#define NEED_UMTX32 +#endif + +#ifdef NEED_UMTX32 struct umutex32 { volatile __lwpid_t m_owner; /* Owner of the mutex */ __uint32_t m_flags; /* Flags of the mutex */ @@ -234,6 +238,18 @@ __offsetof(struct umutex32, m_spare[0]), "m_spare32"); #endif +struct _umtx_copyops { + int (*copyin_timeout)(const void *addr, struct timespec *tsp); + int (*copyin_umtx_time)(const void *addr, size_t size, + struct _umtx_time *tp); + int (*copyin_robust_lists)(const void *addr, size_t size, + struct umtx_robust_lists_params *rbp); + int (*copyout_timeout)(void *addr, size_t size, + struct timespec *tsp); + size_t timespec_sz; + size_t umtx_time_sz; +}; + int umtx_shm_vnobj_persistent = 0; SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_vnode_persistent, CTLFLAG_RWTUN, &umtx_shm_vnobj_persistent, 0, @@ -3434,14 +3450,16 @@ } static int -__umtx_op_unimpl(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_unimpl(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops __unused) { return (EOPNOTSUPP); } static int -__umtx_op_wait(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_wait(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops) { struct _umtx_time timeout, *tm_p; int error; @@ -3449,7 +3467,7 @@ if (uap->uaddr2 == NULL) tm_p = NULL; else { - error = umtx_copyin_umtx_time( + error = ops->copyin_umtx_time( uap->uaddr2, (size_t)uap->uaddr1, &timeout); if (error != 0) return (error); @@ -3459,7 +3477,8 @@ } static int -__umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops) { struct _umtx_time timeout, *tm_p; int error; @@ -3467,7 +3486,7 @@ if (uap->uaddr2 == NULL) tm_p = NULL; else { - error = umtx_copyin_umtx_time( + error = ops->copyin_umtx_time( uap->uaddr2, (size_t)uap->uaddr1, &timeout); if (error != 0) return (error); @@ -3477,7 +3496,8 @@ } static int -__umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops) { struct _umtx_time *tm_p, timeout; int error; @@ -3485,7 +3505,7 @@ if (uap->uaddr2 == NULL) tm_p = NULL; else { - error = umtx_copyin_umtx_time( + error = ops->copyin_umtx_time( uap->uaddr2, (size_t)uap->uaddr1, &timeout); if (error != 0) return (error); @@ -3495,7 +3515,8 @@ } static int -__umtx_op_wake(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_wake(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops __unused) { return (kern_umtx_wake(td, uap->obj, uap->val, 0)); @@ -3503,7 +3524,8 @@ #define BATCH_SIZE 128 static int -__umtx_op_nwake_private(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_nwake_private(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops __unused) { char *uaddrs[BATCH_SIZE], **upp; int count, error, i, pos, tocopy; @@ -3524,14 +3546,16 @@ } static int -__umtx_op_wake_private(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_wake_private(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops __unused) { return (kern_umtx_wake(td, uap->obj, uap->val, 1)); } static int -__umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops) { struct _umtx_time *tm_p, timeout; int error; @@ -3540,7 +3564,7 @@ if (uap->uaddr2 == NULL) tm_p = NULL; else { - error = umtx_copyin_umtx_time( + error = ops->copyin_umtx_time( uap->uaddr2, (size_t)uap->uaddr1, &timeout); if (error != 0) return (error); @@ -3550,14 +3574,16 @@ } static int -__umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops __unused) { return (do_lock_umutex(td, uap->obj, NULL, _UMUTEX_TRY)); } static int -__umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops) { struct _umtx_time *tm_p, timeout; int error; @@ -3566,7 +3592,7 @@ if (uap->uaddr2 == NULL) tm_p = NULL; else { - error = umtx_copyin_umtx_time( + error = ops->copyin_umtx_time( uap->uaddr2, (size_t)uap->uaddr1, &timeout); if (error != 0) return (error); @@ -3576,28 +3602,32 @@ } static int -__umtx_op_wake_umutex(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_wake_umutex(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops __unused) { return (do_wake_umutex(td, uap->obj)); } static int -__umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops __unused) { return (do_unlock_umutex(td, uap->obj, false)); } static int -__umtx_op_set_ceiling(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_set_ceiling(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops __unused) { return (do_set_ceiling(td, uap->obj, uap->val, uap->uaddr1)); } static int -__umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops) { struct timespec *ts, timeout; int error; @@ -3606,7 +3636,7 @@ if (uap->uaddr2 == NULL) ts = NULL; else { - error = umtx_copyin_timeout(uap->uaddr2, &timeout); + error = ops->copyin_timeout(uap->uaddr2, &timeout); if (error != 0) return (error); ts = &timeout; @@ -3615,21 +3645,24 @@ } static int -__umtx_op_cv_signal(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_cv_signal(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops __unused) { return (do_cv_signal(td, uap->obj)); } static int -__umtx_op_cv_broadcast(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_cv_broadcast(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops __unused) { return (do_cv_broadcast(td, uap->obj)); } static int -__umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops) { struct _umtx_time timeout; int error; @@ -3638,7 +3671,7 @@ if (uap->uaddr2 == NULL) { error = do_rw_rdlock(td, uap->obj, uap->val, 0); } else { - error = umtx_copyin_umtx_time(uap->uaddr2, + error = ops->copyin_umtx_time(uap->uaddr2, (size_t)uap->uaddr1, &timeout); if (error != 0) return (error); @@ -3648,7 +3681,8 @@ } static int -__umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops) { struct _umtx_time timeout; int error; @@ -3657,7 +3691,7 @@ if (uap->uaddr2 == NULL) { error = do_rw_wrlock(td, uap->obj, 0); } else { - error = umtx_copyin_umtx_time(uap->uaddr2, + error = ops->copyin_umtx_time(uap->uaddr2, (size_t)uap->uaddr1, &timeout); if (error != 0) return (error); @@ -3668,7 +3702,8 @@ } static int -__umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops __unused) { return (do_rw_unlock(td, uap->obj)); @@ -3676,7 +3711,8 @@ #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10) static int -__umtx_op_sem_wait(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_sem_wait(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops) { struct _umtx_time *tm_p, timeout; int error; @@ -3685,7 +3721,7 @@ if (uap->uaddr2 == NULL) tm_p = NULL; else { - error = umtx_copyin_umtx_time( + error = ops->copyin_umtx_time( uap->uaddr2, (size_t)uap->uaddr1, &timeout); if (error != 0) return (error); @@ -3695,7 +3731,8 @@ } static int -__umtx_op_sem_wake(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_sem_wake(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops __unused) { return (do_sem_wake(td, uap->obj)); @@ -3703,14 +3740,16 @@ #endif static int -__umtx_op_wake2_umutex(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_wake2_umutex(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops __unused) { return (do_wake2_umutex(td, uap->obj, uap->val)); } static int -__umtx_op_sem2_wait(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_sem2_wait(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops) { struct _umtx_time *tm_p, timeout; size_t uasize; @@ -3722,7 +3761,7 @@ tm_p = NULL; } else { uasize = (size_t)uap->uaddr1; - error = umtx_copyin_umtx_time(uap->uaddr2, uasize, &timeout); + error = ops->copyin_umtx_time(uap->uaddr2, uasize, &timeout); if (error != 0) return (error); tm_p = &timeout; @@ -3730,10 +3769,10 @@ error = do_sem2_wait(td, uap->obj, tm_p); if (error == EINTR && uap->uaddr2 != NULL && (timeout._flags & UMTX_ABSTIME) == 0 && - uasize >= sizeof(struct _umtx_time) + sizeof(struct timespec)) { - error = copyout(&timeout._timeout, - (struct _umtx_time *)uap->uaddr2 + 1, - sizeof(struct timespec)); + uasize >= ops->umtx_time_sz + ops->timespec_sz) { + error = ops->copyout_timeout( + (void *)((uintptr_t)uap->uaddr2 + ops->umtx_time_sz), + uasize - ops->umtx_time_sz, &timeout._timeout); if (error == 0) { error = EINTR; } @@ -3743,7 +3782,8 @@ } static int -__umtx_op_sem2_wake(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_sem2_wake(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops __unused) { return (do_sem2_wake(td, uap->obj)); @@ -4050,7 +4090,8 @@ } static int -__umtx_op_shm(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_shm(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops __unused) { return (umtx_shm(td, uap->uaddr1, uap->val)); @@ -4067,76 +4108,54 @@ } static int -__umtx_op_robust_lists(struct thread *td, struct _umtx_op_args *uap) +__umtx_op_robust_lists(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *ops) { struct umtx_robust_lists_params rb; int error; - if (uap->val > sizeof(rb)) - return (EINVAL); bzero(&rb, sizeof(rb)); - error = copyin(uap->uaddr1, &rb, uap->val); + error = ops->copyin_robust_lists(uap->uaddr1, uap->val, &rb); if (error != 0) return (error); return (umtx_robust_lists(td, &rb)); } -typedef int (*_umtx_op_func)(struct thread *td, struct _umtx_op_args *uap); +static int +umtx_copyin_robust_lists(const void *addr, size_t size, + struct umtx_robust_lists_params *rb) +{ -static const _umtx_op_func op_table[] = { - [UMTX_OP_RESERVED0] = __umtx_op_unimpl, - [UMTX_OP_RESERVED1] = __umtx_op_unimpl, - [UMTX_OP_WAIT] = __umtx_op_wait, - [UMTX_OP_WAKE] = __umtx_op_wake, - [UMTX_OP_MUTEX_TRYLOCK] = __umtx_op_trylock_umutex, - [UMTX_OP_MUTEX_LOCK] = __umtx_op_lock_umutex, - [UMTX_OP_MUTEX_UNLOCK] = __umtx_op_unlock_umutex, - [UMTX_OP_SET_CEILING] = __umtx_op_set_ceiling, - [UMTX_OP_CV_WAIT] = __umtx_op_cv_wait, - [UMTX_OP_CV_SIGNAL] = __umtx_op_cv_signal, - [UMTX_OP_CV_BROADCAST] = __umtx_op_cv_broadcast, - [UMTX_OP_WAIT_UINT] = __umtx_op_wait_uint, - [UMTX_OP_RW_RDLOCK] = __umtx_op_rw_rdlock, - [UMTX_OP_RW_WRLOCK] = __umtx_op_rw_wrlock, - [UMTX_OP_RW_UNLOCK] = __umtx_op_rw_unlock, - [UMTX_OP_WAIT_UINT_PRIVATE] = __umtx_op_wait_uint_private, - [UMTX_OP_WAKE_PRIVATE] = __umtx_op_wake_private, - [UMTX_OP_MUTEX_WAIT] = __umtx_op_wait_umutex, - [UMTX_OP_MUTEX_WAKE] = __umtx_op_wake_umutex, -#if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10) - [UMTX_OP_SEM_WAIT] = __umtx_op_sem_wait, - [UMTX_OP_SEM_WAKE] = __umtx_op_sem_wake, -#else - [UMTX_OP_SEM_WAIT] = __umtx_op_unimpl, - [UMTX_OP_SEM_WAKE] = __umtx_op_unimpl, -#endif - [UMTX_OP_NWAKE_PRIVATE] = __umtx_op_nwake_private, - [UMTX_OP_MUTEX_WAKE2] = __umtx_op_wake2_umutex, - [UMTX_OP_SEM2_WAIT] = __umtx_op_sem2_wait, - [UMTX_OP_SEM2_WAKE] = __umtx_op_sem2_wake, - [UMTX_OP_SHM] = __umtx_op_shm, - [UMTX_OP_ROBUST_LISTS] = __umtx_op_robust_lists, -}; + if (size > sizeof(*rb)) + return (EINVAL); + return (copyin(addr, &rb, size)); +} -int -sys__umtx_op(struct thread *td, struct _umtx_op_args *uap) +static int +umtx_copyout_timeout(void *addr, size_t sz, struct timespec *tsp) { - if ((unsigned)uap->op < nitems(op_table)) - return (*op_table[uap->op])(td, uap); - return (EINVAL); -} + KASSERT(sz >= sizeof(struct timespec), + ("_umtx_copyops specifies incorrect sizes")); -#ifdef COMPAT_FREEBSD32 + return (copyout(tsp, addr, sizeof(struct timespec))); +} +#ifdef NEED_UMTX32 struct umtx_time32 { struct timespec32 timeout; uint32_t flags; uint32_t clockid; }; +struct umtx_robust_lists_params_compat32 { + uint32_t robust_list_offset; + uint32_t robust_priv_list_offset; + uint32_t robust_inact_offset; +}; + static inline int -umtx_copyin_timeout32(void *addr, struct timespec *tsp) +umtx_copyin_timeout32(const void *addr, struct timespec *tsp) { struct timespec32 ts32; int error; @@ -4180,285 +4199,131 @@ } static int -__umtx_op_wait_compat32(struct thread *td, struct _umtx_op_args *uap) -{ - struct _umtx_time *tm_p, timeout; - int error; - - if (uap->uaddr2 == NULL) - tm_p = NULL; - else { - error = umtx_copyin_umtx_time32(uap->uaddr2, - (size_t)uap->uaddr1, &timeout); - if (error != 0) - return (error); - tm_p = &timeout; - } - return (do_wait(td, uap->obj, uap->val, tm_p, 1, 0)); -} - -static int -__umtx_op_lock_umutex_compat32(struct thread *td, struct _umtx_op_args *uap) -{ - struct _umtx_time *tm_p, timeout; - int error; - - /* Allow a null timespec (wait forever). */ - if (uap->uaddr2 == NULL) - tm_p = NULL; - else { - error = umtx_copyin_umtx_time32(uap->uaddr2, - (size_t)uap->uaddr1, &timeout); - if (error != 0) - return (error); - tm_p = &timeout; - } - return (do_lock_umutex(td, uap->obj, tm_p, 0)); -} - -static int -__umtx_op_wait_umutex_compat32(struct thread *td, struct _umtx_op_args *uap) -{ - struct _umtx_time *tm_p, timeout; - int error; - - /* Allow a null timespec (wait forever). */ - if (uap->uaddr2 == NULL) - tm_p = NULL; - else { - error = umtx_copyin_umtx_time32(uap->uaddr2, - (size_t)uap->uaddr1, &timeout); - if (error != 0) - return (error); - tm_p = &timeout; - } - return (do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT)); -} - -static int -__umtx_op_cv_wait_compat32(struct thread *td, struct _umtx_op_args *uap) -{ - struct timespec *ts, timeout; - int error; - - /* Allow a null timespec (wait forever). */ - if (uap->uaddr2 == NULL) - ts = NULL; - else { - error = umtx_copyin_timeout32(uap->uaddr2, &timeout); - if (error != 0) - return (error); - ts = &timeout; - } - return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val)); -} - -static int -__umtx_op_rw_rdlock_compat32(struct thread *td, struct _umtx_op_args *uap) -{ - struct _umtx_time timeout; - int error; - - /* Allow a null timespec (wait forever). */ - if (uap->uaddr2 == NULL) { - error = do_rw_rdlock(td, uap->obj, uap->val, 0); - } else { - error = umtx_copyin_umtx_time32(uap->uaddr2, - (size_t)uap->uaddr1, &timeout); - if (error != 0) - return (error); - error = do_rw_rdlock(td, uap->obj, uap->val, &timeout); - } - return (error); -} - -static int -__umtx_op_rw_wrlock_compat32(struct thread *td, struct _umtx_op_args *uap) +umtx_copyin_robust_lists32(const void *addr, size_t size, + struct umtx_robust_lists_params *rbp) { - struct _umtx_time timeout; - int error; - - /* Allow a null timespec (wait forever). */ - if (uap->uaddr2 == NULL) { - error = do_rw_wrlock(td, uap->obj, 0); - } else { - error = umtx_copyin_umtx_time32(uap->uaddr2, - (size_t)uap->uaddr1, &timeout); - if (error != 0) - return (error); - error = do_rw_wrlock(td, uap->obj, &timeout); - } - return (error); -} - -static int -__umtx_op_wait_uint_private_compat32(struct thread *td, struct _umtx_op_args *uap) -{ - struct _umtx_time *tm_p, timeout; - int error; - - if (uap->uaddr2 == NULL) - tm_p = NULL; - else { - error = umtx_copyin_umtx_time32( - uap->uaddr2, (size_t)uap->uaddr1,&timeout); - if (error != 0) - return (error); - tm_p = &timeout; - } - return (do_wait(td, uap->obj, uap->val, tm_p, 1, 1)); -} - -#if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10) -static int -__umtx_op_sem_wait_compat32(struct thread *td, struct _umtx_op_args *uap) -{ - struct _umtx_time *tm_p, timeout; + struct umtx_robust_lists_params_compat32 rb32; int error; - /* Allow a null timespec (wait forever). */ - if (uap->uaddr2 == NULL) - tm_p = NULL; - else { - error = umtx_copyin_umtx_time32(uap->uaddr2, - (size_t)uap->uaddr1, &timeout); - if (error != 0) - return (error); - tm_p = &timeout; - } - return (do_sem_wait(td, uap->obj, tm_p)); + if (size > sizeof(rb32)) + return (EINVAL); + bzero(&rb32, sizeof(rb32)); + error = copyin(addr, &rb32, size); + if (error != 0) + return (error); + rbp->robust_list_offset = rb32.robust_list_offset; + rbp->robust_priv_list_offset = rb32.robust_priv_list_offset; + rbp->robust_inact_offset = rb32.robust_inact_offset; + return (0); } -#endif static int -__umtx_op_sem2_wait_compat32(struct thread *td, struct _umtx_op_args *uap) +umtx_copyout_timeout32(void *addr, size_t sz, struct timespec *tsp) { - struct _umtx_time *tm_p, timeout; - size_t uasize; - int error; - - /* Allow a null timespec (wait forever). */ - if (uap->uaddr2 == NULL) { - uasize = 0; - tm_p = NULL; - } else { - uasize = (size_t)uap->uaddr1; - error = umtx_copyin_umtx_time32(uap->uaddr2, uasize, &timeout); - if (error != 0) - return (error); - tm_p = &timeout; - } - error = do_sem2_wait(td, uap->obj, tm_p); - if (error == EINTR && uap->uaddr2 != NULL && - (timeout._flags & UMTX_ABSTIME) == 0 && - uasize >= sizeof(struct umtx_time32) + sizeof(struct timespec32)) { - struct timespec32 remain32 = { - .tv_sec = timeout._timeout.tv_sec, - .tv_nsec = timeout._timeout.tv_nsec - }; - error = copyout(&remain32, - (struct umtx_time32 *)uap->uaddr2 + 1, - sizeof(struct timespec32)); - if (error == 0) { - error = EINTR; - } - } + struct timespec32 remain32 = { + .tv_sec = tsp->tv_sec, + .tv_nsec = tsp->tv_nsec, + }; - return (error); -} + KASSERT(sz >= sizeof(struct timespec32), + ("_umtx_copyops specifies incorrect sizes")); -static int -__umtx_op_nwake_private32(struct thread *td, struct _umtx_op_args *uap) -{ - uint32_t uaddrs[BATCH_SIZE], **upp; - int count, error, i, pos, tocopy; - - upp = (uint32_t **)uap->obj; - error = 0; - for (count = uap->val, pos = 0; count > 0; count -= tocopy, - pos += tocopy) { - tocopy = MIN(count, BATCH_SIZE); - error = copyin(upp + pos, uaddrs, tocopy * sizeof(uint32_t)); - if (error != 0) - break; - for (i = 0; i < tocopy; ++i) - kern_umtx_wake(td, (void *)(intptr_t)uaddrs[i], - INT_MAX, 1); - maybe_yield(); - } - return (error); + return (copyout(&remain32, addr, sizeof(struct timespec32))); } +#endif /* NEED_UMTX32 */ -struct umtx_robust_lists_params_compat32 { - uint32_t robust_list_offset; - uint32_t robust_priv_list_offset; - uint32_t robust_inact_offset; -}; - -static int -__umtx_op_robust_lists_compat32(struct thread *td, struct _umtx_op_args *uap) -{ - struct umtx_robust_lists_params rb; - struct umtx_robust_lists_params_compat32 rb32; - int error; - - if (uap->val > sizeof(rb32)) - return (EINVAL); - bzero(&rb, sizeof(rb)); - bzero(&rb32, sizeof(rb32)); - error = copyin(uap->uaddr1, &rb32, uap->val); - if (error != 0) - return (error); - rb.robust_list_offset = rb32.robust_list_offset; - rb.robust_priv_list_offset = rb32.robust_priv_list_offset; - rb.robust_inact_offset = rb32.robust_inact_offset; - return (umtx_robust_lists(td, &rb)); -} +typedef int (*_umtx_op_func)(struct thread *td, struct _umtx_op_args *uap, + const struct _umtx_copyops *umtx_ops); -static const _umtx_op_func op_table_compat32[] = { +static const _umtx_op_func op_table[] = { [UMTX_OP_RESERVED0] = __umtx_op_unimpl, [UMTX_OP_RESERVED1] = __umtx_op_unimpl, - [UMTX_OP_WAIT] = __umtx_op_wait_compat32, + [UMTX_OP_WAIT] = __umtx_op_wait, [UMTX_OP_WAKE] = __umtx_op_wake, [UMTX_OP_MUTEX_TRYLOCK] = __umtx_op_trylock_umutex, - [UMTX_OP_MUTEX_LOCK] = __umtx_op_lock_umutex_compat32, + [UMTX_OP_MUTEX_LOCK] = __umtx_op_lock_umutex, [UMTX_OP_MUTEX_UNLOCK] = __umtx_op_unlock_umutex, [UMTX_OP_SET_CEILING] = __umtx_op_set_ceiling, - [UMTX_OP_CV_WAIT] = __umtx_op_cv_wait_compat32, + [UMTX_OP_CV_WAIT] = __umtx_op_cv_wait, [UMTX_OP_CV_SIGNAL] = __umtx_op_cv_signal, [UMTX_OP_CV_BROADCAST] = __umtx_op_cv_broadcast, - [UMTX_OP_WAIT_UINT] = __umtx_op_wait_compat32, - [UMTX_OP_RW_RDLOCK] = __umtx_op_rw_rdlock_compat32, - [UMTX_OP_RW_WRLOCK] = __umtx_op_rw_wrlock_compat32, + [UMTX_OP_WAIT_UINT] = __umtx_op_wait_uint, + [UMTX_OP_RW_RDLOCK] = __umtx_op_rw_rdlock, + [UMTX_OP_RW_WRLOCK] = __umtx_op_rw_wrlock, [UMTX_OP_RW_UNLOCK] = __umtx_op_rw_unlock, - [UMTX_OP_WAIT_UINT_PRIVATE] = __umtx_op_wait_uint_private_compat32, + [UMTX_OP_WAIT_UINT_PRIVATE] = __umtx_op_wait_uint_private, [UMTX_OP_WAKE_PRIVATE] = __umtx_op_wake_private, - [UMTX_OP_MUTEX_WAIT] = __umtx_op_wait_umutex_compat32, + [UMTX_OP_MUTEX_WAIT] = __umtx_op_wait_umutex, [UMTX_OP_MUTEX_WAKE] = __umtx_op_wake_umutex, #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10) - [UMTX_OP_SEM_WAIT] = __umtx_op_sem_wait_compat32, + [UMTX_OP_SEM_WAIT] = __umtx_op_sem_wait, [UMTX_OP_SEM_WAKE] = __umtx_op_sem_wake, #else [UMTX_OP_SEM_WAIT] = __umtx_op_unimpl, [UMTX_OP_SEM_WAKE] = __umtx_op_unimpl, #endif - [UMTX_OP_NWAKE_PRIVATE] = __umtx_op_nwake_private32, + [UMTX_OP_NWAKE_PRIVATE] = __umtx_op_nwake_private, [UMTX_OP_MUTEX_WAKE2] = __umtx_op_wake2_umutex, - [UMTX_OP_SEM2_WAIT] = __umtx_op_sem2_wait_compat32, + [UMTX_OP_SEM2_WAIT] = __umtx_op_sem2_wait, [UMTX_OP_SEM2_WAKE] = __umtx_op_sem2_wake, [UMTX_OP_SHM] = __umtx_op_shm, - [UMTX_OP_ROBUST_LISTS] = __umtx_op_robust_lists_compat32, + [UMTX_OP_ROBUST_LISTS] = __umtx_op_robust_lists, +}; + +struct _umtx_copyops umtx_native_ops = { + .copyin_timeout = umtx_copyin_timeout, + .copyin_umtx_time = umtx_copyin_umtx_time, + .copyin_robust_lists = umtx_copyin_robust_lists, + .copyout_timeout = umtx_copyout_timeout, + .timespec_sz = sizeof(struct timespec), + .umtx_time_sz = sizeof(struct _umtx_time), }; +#ifdef NEED_UMTX32 +struct _umtx_copyops umtx_native_ops32 = { + .copyin_timeout = umtx_copyin_timeout32, + .copyin_umtx_time = umtx_copyin_umtx_time32, + .copyin_robust_lists = umtx_copyin_robust_lists32, + .copyout_timeout = umtx_copyout_timeout32, + .timespec_sz = sizeof(struct timespec32), + .umtx_time_sz = sizeof(struct umtx_time32), +}; +#endif + +static int +kern__umtx_op(struct thread *td, void *obj, int op, unsigned long val, + void *uaddr1, void *uaddr2, struct _umtx_copyops *ops) +{ + struct _umtx_op_args uap = { + .obj = obj, + .op = op, + .val = val, + .uaddr1 = uaddr1, + .uaddr2 = uaddr2 + }; + + if ((uap.op >= nitems(op_table))) + return (EINVAL); + return (*op_table[uap.op])(td, &uap, ops); +} + +int +sys__umtx_op(struct thread *td, struct _umtx_op_args *uap) +{ + struct _umtx_copyops *umtx_ops; + + umtx_ops = &umtx_native_ops; + return (kern__umtx_op(td, uap->obj, uap->op, uap->val, uap->uaddr1, + uap->uaddr2, umtx_ops)); +} + +#ifdef COMPAT_FREEBSD32 int freebsd32__umtx_op(struct thread *td, struct freebsd32__umtx_op_args *uap) { - if ((unsigned)uap->op < nitems(op_table_compat32)) { - return (*op_table_compat32[uap->op])(td, - (struct _umtx_op_args *)uap); - } - return (EINVAL); + return (kern__umtx_op(td, uap->obj, uap->op, uap->val, uap->uaddr, + uap->uaddr2, &umtx_native_ops32)); } #endif