diff --git a/lib/libc/sys/_umtx_op.2 b/lib/libc/sys/_umtx_op.2 --- a/lib/libc/sys/_umtx_op.2 +++ b/lib/libc/sys/_umtx_op.2 @@ -1270,6 +1270,17 @@ See .Sx ROBUST UMUTEXES subsection for details. +.It Dv UMTX_OP_GET_MIN_TIMEOUT +Writes out the current value of minimal umtx operations timeout, +in nanoseconds, into the long integer variable pointed to by +.Fa uaddr1 . +.It Dv UMTX_OP_SET_MIN_TIMEOUT +Set the minimal amount of time, in nanoseconds, the thread is required +to sleep for umtx operations specifying a timeout using absolute clocks. +The value is taken from the +.Fa val +argument of the call. +Zero means no minimum. .El .Pp The diff --git a/lib/libthr/libthr.3 b/lib/libthr/libthr.3 --- a/lib/libthr/libthr.3 +++ b/lib/libthr/libthr.3 @@ -196,6 +196,12 @@ threads are inserted at the head of the sleep queue, instead of its tail. Bigger values reduce the frequency of the FIFO discipline. The value must be between 0 and 255. +.It Dv LIBPTHREAD_UMTX_MIN_TIMEOUT +The minimal amount of time, in nanoseconds, the thread is required to sleep +for pthread operations specifying a timeout. +If the operation requests a timeout less than the value provided, +it is silently increased to the value. +The value of zero means no minimum (default). .Pp .El The following diff --git a/lib/libthr/thread/thr_init.c b/lib/libthr/thread/thr_init.c --- a/lib/libthr/thread/thr_init.c +++ b/lib/libthr/thread/thr_init.c @@ -522,6 +522,17 @@ if (env) _thr_queuefifo = atoi(env); TAILQ_INIT(&_thr_atfork_list); + env = getenv("LIBPTHREAD_UMTX_MIN_TIMEOUT"); + if (env) { + char *endptr; + long mint; + + mint = strtol(env, &endptr, 0); + if (*endptr == '\0' && mint >= 0) { + _umtx_op(NULL, UMTX_OP_SET_MIN_TIMEOUT, + mint, NULL, NULL); + } + } } init_once = 1; } diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -101,7 +101,7 @@ "struct proc KBI p_filemon"); _Static_assert(offsetof(struct proc, p_comm) == 0x3e0, "struct proc KBI p_comm"); -_Static_assert(offsetof(struct proc, p_emuldata) == 0x4c8, +_Static_assert(offsetof(struct proc, p_emuldata) == 0x4d0, "struct proc KBI p_emuldata"); #endif #ifdef __i386__ @@ -121,7 +121,7 @@ "struct proc KBI p_filemon"); _Static_assert(offsetof(struct proc, p_comm) == 0x284, "struct proc KBI p_comm"); -_Static_assert(offsetof(struct proc, p_emuldata) == 0x310, +_Static_assert(offsetof(struct proc, p_emuldata) == 0x318, "struct proc KBI p_emuldata"); #endif diff --git a/sys/kern/kern_umtx.c b/sys/kern/kern_umtx.c --- a/sys/kern/kern_umtx.c +++ b/sys/kern/kern_umtx.c @@ -700,6 +700,19 @@ (umtxtime->_flags & UMTX_ABSTIME) != 0, &umtxtime->_timeout); } +static void +umtx_abs_timeout_enforce_min(sbintime_t *sbt) +{ + sbintime_t when, mint; + + mint = curproc->p_umtx_min_timeout; + if (__predict_false(mint != 0)) { + when = sbinuptime() + mint; + if (*sbt < when) + *sbt = when; + } +} + static int umtx_abs_timeout_getsbt(struct umtx_abs_timeout *timo, sbintime_t *sbt, int *flags) @@ -739,6 +752,7 @@ return (0); } *sbt = bttosbt(bt); + umtx_abs_timeout_enforce_min(sbt); /* * Check if the absolute time should be aligned to @@ -4594,6 +4608,33 @@ return (0); } +static int +__umtx_op_get_min_timeout(struct thread *td, struct _umtx_op_args *uap, + const struct umtx_copyops *ops) +{ + long val; + int error, val1; + + val = sbttons(td->td_proc->p_umtx_min_timeout); + if (ops->compat32) { + val1 = (int)val; + error = copyout(&val1, uap->uaddr1, sizeof(val1)); + } else { + error = copyout(&val, uap->uaddr1, sizeof(val)); + } + return (error); +} + +static int +__umtx_op_set_min_timeout(struct thread *td, struct _umtx_op_args *uap, + const struct umtx_copyops *ops) +{ + if (uap->val < 0) + return (EINVAL); + td->td_proc->p_umtx_min_timeout = nstosbt(uap->val); + return (0); +} + #if defined(__i386__) || defined(__amd64__) /* * Provide the standard 32-bit definitions for x86, since native/compat32 use a @@ -4816,6 +4857,8 @@ [UMTX_OP_SEM2_WAKE] = __umtx_op_sem2_wake, [UMTX_OP_SHM] = __umtx_op_shm, [UMTX_OP_ROBUST_LISTS] = __umtx_op_robust_lists, + [UMTX_OP_GET_MIN_TIMEOUT] = __umtx_op_get_min_timeout, + [UMTX_OP_SET_MIN_TIMEOUT] = __umtx_op_set_min_timeout, }; static const struct umtx_copyops umtx_native_ops = { @@ -4990,6 +5033,8 @@ umtx_thread_cleanup(td); td->td_rb_list = td->td_rbp_list = td->td_rb_inact = 0; } + + p->p_umtx_min_timeout = 0; } /* diff --git a/sys/sys/proc.h b/sys/sys/proc.h --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -741,6 +741,7 @@ uint64_t p_elf_flags; /* (x) ELF flags */ void *p_elf_brandinfo; /* (x) Elf_Brandinfo, NULL for non ELF binaries. */ + sbintime_t p_umtx_min_timeout; /* End area that is copied on creation. */ #define p_endcopy p_xexit diff --git a/sys/sys/umtx.h b/sys/sys/umtx.h --- a/sys/sys/umtx.h +++ b/sys/sys/umtx.h @@ -103,6 +103,8 @@ #define UMTX_OP_SEM2_WAKE 24 #define UMTX_OP_SHM 25 #define UMTX_OP_ROBUST_LISTS 26 +#define UMTX_OP_GET_MIN_TIMEOUT 27 +#define UMTX_OP_SET_MIN_TIMEOUT 28 /* * Flags for ops; the double-underbar convention must be maintained for future