diff --git a/share/man/man9/callout.9 b/share/man/man9/callout.9 index 9e0cf5e6dc5e..5c011a20d338 100644 --- a/share/man/man9/callout.9 +++ b/share/man/man9/callout.9 @@ -1,916 +1,913 @@ .\" $NetBSD: timeout.9,v 1.2 1996/06/23 22:32:34 pk Exp $ .\" .\" Copyright (c) 1996 The NetBSD Foundation, Inc. .\" All rights reserved. .\" .\" This code is derived from software contributed to The NetBSD Foundation .\" by Paul Kranenburg. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS .\" ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED .\" TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR .\" PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE .\" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR .\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF .\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS .\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN .\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE .\" POSSIBILITY OF SUCH DAMAGE. .\" .\" $FreeBSD$ .\" -.Dd December 13, 2019 +.Dd September 1, 2021 .Dt CALLOUT 9 .Os .Sh NAME .Nm callout_active , .Nm callout_deactivate , .Nm callout_async_drain , .Nm callout_drain , .Nm callout_init , .Nm callout_init_mtx , .Nm callout_init_rm , .Nm callout_init_rw , .Nm callout_pending , .Nm callout_reset , .Nm callout_reset_curcpu , .Nm callout_reset_on , .Nm callout_reset_sbt , .Nm callout_reset_sbt_curcpu , .Nm callout_reset_sbt_on , .Nm callout_schedule , .Nm callout_schedule_curcpu , .Nm callout_schedule_on , .Nm callout_schedule_sbt , .Nm callout_schedule_sbt_curcpu , .Nm callout_schedule_sbt_on , .Nm callout_stop , .Nm callout_when .Nd execute a function after a specified length of time .Sh SYNOPSIS .In sys/types.h .In sys/callout.h .Bd -literal typedef void callout_func_t (void *); .Ed .Ft int .Fn callout_active "struct callout *c" .Ft void .Fn callout_deactivate "struct callout *c" .Ft int .Fn callout_async_drain "struct callout *c" "callout_func_t *drain" .Ft int .Fn callout_drain "struct callout *c" .Ft void .Fn callout_init "struct callout *c" "int mpsafe" .Ft void .Fn callout_init_mtx "struct callout *c" "struct mtx *mtx" "int flags" .Ft void .Fn callout_init_rm "struct callout *c" "struct rmlock *rm" "int flags" .Ft void .Fn callout_init_rw "struct callout *c" "struct rwlock *rw" "int flags" .Ft int .Fn callout_pending "struct callout *c" .Ft int .Fo callout_reset .Fa "struct callout *c" .Fa "int ticks" .Fa "callout_func_t *func" .Fa "void *arg" .Fc .Ft int .Fo callout_reset_curcpu .Fa "struct callout *c" .Fa "int ticks" .Fa "callout_func_t *func" .Fa "void *arg" .Fc .Ft int .Fo callout_reset_on .Fa "struct callout *c" .Fa "int ticks" .Fa "callout_func_t *func" .Fa "void *arg" .Fa "int cpu" .Fc .Ft int .Fo callout_reset_sbt .Fa "struct callout *c" .Fa "sbintime_t sbt" .Fa "sbintime_t pr" .Fa "callout_func_t *func" .Fa "void *arg" .Fa "int flags" .Fc .Ft int .Fo callout_reset_sbt_curcpu .Fa "struct callout *c" .Fa "sbintime_t sbt" .Fa "sbintime_t pr" .Fa "callout_func_t *func" .Fa "void *arg" .Fa "int flags" .Fc .Ft int .Fo callout_reset_sbt_on .Fa "struct callout *c" .Fa "sbintime_t sbt" .Fa "sbintime_t pr" .Fa "callout_func_t *func" .Fa "void *arg" .Fa "int cpu" .Fa "int flags" .Fc .Ft int .Fn callout_schedule "struct callout *c" "int ticks" .Ft int .Fn callout_schedule_curcpu "struct callout *c" "int ticks" .Ft int .Fn callout_schedule_on "struct callout *c" "int ticks" "int cpu" .Ft int .Fo callout_schedule_sbt .Fa "struct callout *c" .Fa "sbintime_t sbt" .Fa "sbintime_t pr" .Fa "int flags" .Fc .Ft int .Fo callout_schedule_sbt_curcpu .Fa "struct callout *c" .Fa "sbintime_t sbt" .Fa "sbintime_t pr" .Fa "int flags" .Fc .Ft int .Fo callout_schedule_sbt_on .Fa "struct callout *c" .Fa "sbintime_t sbt" .Fa "sbintime_t pr" .Fa "int cpu" .Fa "int flags" .Fc .Ft int .Fn callout_stop "struct callout *c" .Ft sbintime_t .Fo callout_when .Fa "sbintime_t sbt" .Fa "sbintime_t precision" .Fa "int flags" .Fa "sbintime_t *sbt_res" .Fa "sbintime_t *precision_res" .Fc .Sh DESCRIPTION The .Nm callout API is used to schedule a call to an arbitrary function at a specific time in the future. Consumers of this API are required to allocate a callout structure .Pq struct callout for each pending function invocation. This structure stores state about the pending function invocation including the function to be called and the time at which the function should be invoked. Pending function calls can be cancelled or rescheduled to a different time. In addition, a callout structure may be reused to schedule a new function call after a scheduled call is completed. .Pp Callouts only provide a single-shot mode. If a consumer requires a periodic timer, it must explicitly reschedule each function call. This is normally done by rescheduling the subsequent call within the called function. .Pp Callout functions must not sleep. They may not acquire sleepable locks, wait on condition variables, perform blocking allocation requests, or invoke any other action that might sleep. .Pp Each callout structure must be initialized by .Fn callout_init , .Fn callout_init_mtx , .Fn callout_init_rm , or .Fn callout_init_rw before it is passed to any of the other callout functions. The .Fn callout_init function initializes a callout structure in .Fa c that is not associated with a specific lock. If the .Fa mpsafe argument is zero, the callout structure is not considered to be .Dq multi-processor safe ; and the Giant lock will be acquired before calling the callout function and released when the callout function returns. .Pp The .Fn callout_init_mtx , .Fn callout_init_rm , and .Fn callout_init_rw functions initialize a callout structure in .Fa c that is associated with a specific lock. The lock is specified by the .Fa mtx , .Fa rm , or .Fa rw parameter. The associated lock must be held while stopping or rescheduling the callout. The callout subsystem acquires the associated lock before calling the callout function and releases it after the function returns. If the callout was cancelled while the callout subsystem waited for the associated lock, the callout function is not called, and the associated lock is released. This ensures that stopping or rescheduling the callout will abort any previously scheduled invocation. .Pp -Only regular mutexes may be used with -.Fn callout_init_mtx ; -spin mutexes are not supported. A sleepable read-mostly lock .Po one initialized with the .Dv RM_SLEEPABLE flag .Pc may not be used with .Fn callout_init_rm . Similarly, other sleepable lock types such as .Xr sx 9 and .Xr lockmgr 9 cannot be used with callouts because sleeping is not permitted in the callout subsystem. .Pp These .Fa flags may be specified for .Fn callout_init_mtx , .Fn callout_init_rm , or .Fn callout_init_rw : .Bl -tag -width ".Dv CALLOUT_RETURNUNLOCKED" .It Dv CALLOUT_RETURNUNLOCKED The callout function will release the associated lock itself, so the callout subsystem should not attempt to unlock it after the callout function returns. .It Dv CALLOUT_SHAREDLOCK The lock is only acquired in read mode when running the callout handler. This flag is ignored by .Fn callout_init_mtx . .El .Pp The function .Fn callout_stop cancels a callout .Fa c if it is currently pending. If the callout is pending and successfully stopped, then .Fn callout_stop returns a value of one. If the callout is not set, or has already been serviced, then negative one is returned. If the callout is currently being serviced and cannot be stopped, then zero will be returned. If the callout is currently being serviced and cannot be stopped, and at the same time a next invocation of the same callout is also scheduled, then .Fn callout_stop unschedules the next run and returns zero. If the callout has an associated lock, then that lock must be held when this function is called. .Pp The function .Fn callout_async_drain is identical to .Fn callout_stop with one difference. When .Fn callout_async_drain returns zero it will arrange for the function .Fa drain to be called using the same argument given to the .Fn callout_reset function. .Fn callout_async_drain If the callout has an associated lock, then that lock must be held when this function is called. Note that when stopping multiple callouts that use the same lock it is possible to get multiple return's of zero and multiple calls to the .Fa drain function, depending upon which CPU's the callouts are running. The .Fa drain function itself is called from the context of the completing callout i.e. softclock or hardclock, just like a callout itself. .Pp The function .Fn callout_drain is identical to .Fn callout_stop except that it will wait for the callout .Fa c to complete if it is already in progress. This function MUST NOT be called while holding any locks on which the callout might block, or deadlock will result. Note that if the callout subsystem has already begun processing this callout, then the callout function may be invoked before .Fn callout_drain returns. However, the callout subsystem does guarantee that the callout will be fully stopped before .Fn callout_drain returns. .Pp The .Fn callout_reset and .Fn callout_schedule function families schedule a future function invocation for callout .Fa c . If .Fa c already has a pending callout, it is cancelled before the new invocation is scheduled. These functions return a value of one if a pending callout was cancelled and zero if there was no pending callout. If the callout has an associated lock, then that lock must be held when any of these functions are called. .Pp The time at which the callout function will be invoked is determined by either the .Fa ticks argument or the .Fa sbt , .Fa pr , and .Fa flags arguments. When .Fa ticks is used, the callout is scheduled to execute after .Fa ticks Ns No /hz seconds. Non-positive values of .Fa ticks are silently converted to the value .Sq 1 . .Pp The .Fa sbt , .Fa pr , and .Fa flags arguments provide more control over the scheduled time including support for higher resolution times, specifying the precision of the scheduled time, and setting an absolute deadline instead of a relative timeout. The callout is scheduled to execute in a time window which begins at the time specified in .Fa sbt and extends for the amount of time specified in .Fa pr . If .Fa sbt specifies a time in the past, the window is adjusted to start at the current time. A non-zero value for .Fa pr allows the callout subsystem to coalesce callouts scheduled close to each other into fewer timer interrupts, reducing processing overhead and power consumption. These .Fa flags may be specified to adjust the interpretation of .Fa sbt and .Fa pr : .Bl -tag -width ".Dv C_DIRECT_EXEC" .It Dv C_ABSOLUTE Handle the .Fa sbt argument as an absolute time since boot. By default, .Fa sbt is treated as a relative amount of time, similar to .Fa ticks . .It Dv C_DIRECT_EXEC Run the handler directly from hardware interrupt context instead of from the softclock thread. This reduces latency and overhead, but puts more constraints on the callout function. Callout functions run in this context may use only spin mutexes for locking and should be as small as possible because they run with absolute priority. .It Fn C_PREL Specifies relative event time precision as binary logarithm of time interval divided by acceptable time deviation: 1 -- 1/2, 2 -- 1/4, etc. Note that the larger of .Fa pr or this value is used as the length of the time window. Smaller values .Pq which result in larger time intervals allow the callout subsystem to aggregate more events in one timer interrupt. .It Dv C_PRECALC The .Fa sbt argument specifies the absolute time at which the callout should be run, and the .Fa pr argument specifies the requested precision, which will not be adjusted during the scheduling process. The .Fa sbt and .Fa pr values should be calculated by an earlier call to .Fn callout_when which uses the user-supplied .Fa sbt , .Fa pr , and .Fa flags values. .It Dv C_HARDCLOCK Align the timeouts to .Fn hardclock calls if possible. .El .Pp The .Fn callout_reset functions accept a .Fa func argument which identifies the function to be called when the time expires. It must be a pointer to a function that takes a single .Fa void * argument. Upon invocation, .Fa func will receive .Fa arg as its only argument. The .Fn callout_schedule functions reuse the .Fa func and .Fa arg arguments from the previous callout. Note that one of the .Fn callout_reset functions must always be called to initialize .Fa func and .Fa arg before one of the .Fn callout_schedule functions can be used. .Pp The callout subsystem provides a softclock thread for each CPU in the system. Callouts are assigned to a single CPU and are executed by the softclock thread for that CPU. Initially, callouts are assigned to CPU 0. The .Fn callout_reset_on , .Fn callout_reset_sbt_on , .Fn callout_schedule_on and .Fn callout_schedule_sbt_on functions assign the callout to CPU .Fa cpu . The .Fn callout_reset_curcpu , .Fn callout_reset_sbt_curpu , .Fn callout_schedule_curcpu and .Fn callout_schedule_sbt_curcpu functions assign the callout to the current CPU. The .Fn callout_reset , .Fn callout_reset_sbt , .Fn callout_schedule and .Fn callout_schedule_sbt functions schedule the callout to execute in the softclock thread of the CPU to which it is currently assigned. .Pp Softclock threads are not pinned to their respective CPUs by default. The softclock thread for CPU 0 can be pinned to CPU 0 by setting the .Va kern.pin_default_swi loader tunable to a non-zero value. Softclock threads for CPUs other than zero can be pinned to their respective CPUs by setting the .Va kern.pin_pcpu_swi loader tunable to a non-zero value. .Pp The macros .Fn callout_pending , .Fn callout_active and .Fn callout_deactivate provide access to the current state of the callout. The .Fn callout_pending macro checks whether a callout is .Em pending ; a callout is considered .Em pending when a timeout has been set but the time has not yet arrived. Note that once the timeout time arrives and the callout subsystem starts to process this callout, .Fn callout_pending will return .Dv FALSE even though the callout function may not have finished .Pq or even begun executing. The .Fn callout_active macro checks whether a callout is marked as .Em active , and the .Fn callout_deactivate macro clears the callout's .Em active flag. The callout subsystem marks a callout as .Em active when a timeout is set and it clears the .Em active flag in .Fn callout_stop and .Fn callout_drain , but it .Em does not clear it when a callout expires normally via the execution of the callout function. .Pp The .Fn callout_when function may be used to pre-calculate the absolute time at which the timeout should be run and the precision of the scheduled run time according to the required time .Fa sbt , precision .Fa precision , and additional adjustments requested by the .Fa flags argument. Flags accepted by the .Fn callout_when function are the same as flags for the .Fn callout_reset function. The resulting time is assigned to the variable pointed to by the .Fa sbt_res argument, and the resulting precision is assigned to .Fa *precision_res . When passing the results to .Fa callout_reset , add the .Va C_PRECALC flag to .Fa flags , to avoid incorrect re-adjustment. The function is intended for situations where precise time of the callout run should be known in advance, since trying to read this time from the callout structure itself after a .Fn callout_reset call is racy. .Ss "Avoiding Race Conditions" The callout subsystem invokes callout functions from its own thread context. Without some kind of synchronization, it is possible that a callout function will be invoked concurrently with an attempt to stop or reset the callout by another thread. In particular, since callout functions typically acquire a lock as their first action, the callout function may have already been invoked, but is blocked waiting for that lock at the time that another thread tries to reset or stop the callout. .Pp There are three main techniques for addressing these synchronization concerns. The first approach is preferred as it is the simplest: .Bl -enum -offset indent .It Callouts can be associated with a specific lock when they are initialized by .Fn callout_init_mtx , .Fn callout_init_rm , or .Fn callout_init_rw . When a callout is associated with a lock, the callout subsystem acquires the lock before the callout function is invoked. This allows the callout subsystem to transparently handle races between callout cancellation, scheduling, and execution. Note that the associated lock must be acquired before calling .Fn callout_stop or one of the .Fn callout_reset or .Fn callout_schedule functions to provide this safety. .Pp A callout initialized via .Fn callout_init with .Fa mpsafe set to zero is implicitly associated with the .Va Giant mutex. If .Va Giant is held when cancelling or rescheduling the callout, then its use will prevent races with the callout function. .It The return value from .Fn callout_stop .Po or the .Fn callout_reset and .Fn callout_schedule function families .Pc indicates whether or not the callout was removed. If it is known that the callout was set and the callout function has not yet executed, then a return value of .Dv FALSE indicates that the callout function is about to be called. For example: .Bd -literal -offset indent if (sc->sc_flags & SCFLG_CALLOUT_RUNNING) { if (callout_stop(&sc->sc_callout)) { sc->sc_flags &= ~SCFLG_CALLOUT_RUNNING; /* successfully stopped */ } else { /* * callout has expired and callout * function is about to be executed */ } } .Ed .It The .Fn callout_pending , .Fn callout_active and .Fn callout_deactivate macros can be used together to work around the race conditions. When a callout's timeout is set, the callout subsystem marks the callout as both .Em active and .Em pending . When the timeout time arrives, the callout subsystem begins processing the callout by first clearing the .Em pending flag. It then invokes the callout function without changing the .Em active flag, and does not clear the .Em active flag even after the callout function returns. The mechanism described here requires the callout function itself to clear the .Em active flag using the .Fn callout_deactivate macro. The .Fn callout_stop and .Fn callout_drain functions always clear both the .Em active and .Em pending flags before returning. .Pp The callout function should first check the .Em pending flag and return without action if .Fn callout_pending returns .Dv TRUE . This indicates that the callout was rescheduled using .Fn callout_reset just before the callout function was invoked. If .Fn callout_active returns .Dv FALSE then the callout function should also return without action. This indicates that the callout has been stopped. Finally, the callout function should call .Fn callout_deactivate to clear the .Em active flag. For example: .Bd -literal -offset indent mtx_lock(&sc->sc_mtx); if (callout_pending(&sc->sc_callout)) { /* callout was reset */ mtx_unlock(&sc->sc_mtx); return; } if (!callout_active(&sc->sc_callout)) { /* callout was stopped */ mtx_unlock(&sc->sc_mtx); return; } callout_deactivate(&sc->sc_callout); /* rest of callout function */ .Ed .Pp Together with appropriate synchronization, such as the mutex used above, this approach permits the .Fn callout_stop and .Fn callout_reset functions to be used at any time without races. For example: .Bd -literal -offset indent mtx_lock(&sc->sc_mtx); callout_stop(&sc->sc_callout); /* The callout is effectively stopped now. */ .Ed .Pp If the callout is still pending then these functions operate normally, but if processing of the callout has already begun then the tests in the callout function cause it to return without further action. Synchronization between the callout function and other code ensures that stopping or resetting the callout will never be attempted while the callout function is past the .Fn callout_deactivate call. .Pp The above technique additionally ensures that the .Em active flag always reflects whether the callout is effectively enabled or disabled. If .Fn callout_active returns false, then the callout is effectively disabled, since even if the callout subsystem is actually just about to invoke the callout function, the callout function will return without action. .El .Pp There is one final race condition that must be considered when a callout is being stopped for the last time. In this case it may not be safe to let the callout function itself detect that the callout was stopped, since it may need to access data objects that have already been destroyed or recycled. To ensure that the callout is completely finished, a call to .Fn callout_drain should be used. In particular, a callout should always be drained prior to destroying its associated lock or releasing the storage for the callout structure. .Sh RETURN VALUES The .Fn callout_active macro returns the state of a callout's .Em active flag. .Pp The .Fn callout_pending macro returns the state of a callout's .Em pending flag. .Pp The .Fn callout_reset and .Fn callout_schedule function families return a value of one if the callout was pending before the new function invocation was scheduled. .Pp The .Fn callout_stop and .Fn callout_drain functions return a value of one if the callout was still pending when it was called, a zero if the callout could not be stopped and a negative one is it was either not running or has already completed. .Sh HISTORY .Fx initially used the long standing .Bx linked list callout mechanism which offered O(n) insertion and removal running time but did not generate or require handles for untimeout operations. .Pp .Fx 3.0 introduced a new set of timeout and untimeout routines from .Nx based on the work of .An Adam M. Costello and .An George Varghese , published in a technical report entitled .%T "Redesigning the BSD Callout and Timer Facilities" and modified for inclusion in .Fx by .An Justin T. Gibbs . The original work on the data structures used in that implementation was published by .An G. Varghese and .An A. Lauck in the paper .%T "Hashed and Hierarchical Timing Wheels: Data Structures for the Efficient Implementation of a Timer Facility" in the .%B "Proceedings of the 11th ACM Annual Symposium on Operating Systems Principles" . .Pp .Fx 3.3 introduced the first implementations of .Fn callout_init , .Fn callout_reset , and .Fn callout_stop which permitted callers to allocate dedicated storage for callouts. This ensured that a callout would always fire unlike .Fn timeout which would silently fail if it was unable to allocate a callout. .Pp .Fx 5.0 permitted callout handlers to be tagged as MPSAFE via .Fn callout_init . .Pp .Fx 5.3 introduced .Fn callout_drain . .Pp .Fx 6.0 introduced .Fn callout_init_mtx . .Pp .Fx 8.0 introduced per-CPU callout wheels, .Fn callout_init_rw , and .Fn callout_schedule . .Pp .Fx 9.0 changed the underlying timer interrupts used to drive callouts to prefer one-shot event timers instead of a periodic timer interrupt. .Pp .Fx 10.0 switched the callout wheel to support tickless operation. These changes introduced .Vt sbintime_t and the .Fn callout_reset_sbt* family of functions. .Fx 10.0 also added .Dv C_DIRECT_EXEC and .Fn callout_init_rm . .Pp .Fx 10.2 introduced the .Fn callout_schedule_sbt* family of functions. .Pp .Fx 11.0 introduced .Fn callout_async_drain . .Fx 11.1 introduced .Fn callout_when . .Fx 13.0 removed .Vt timeout_t , .Fn timeout , and .Fn untimeout . diff --git a/share/man/man9/taskqueue.9 b/share/man/man9/taskqueue.9 index 2ede90458629..58eb6b7c5571 100644 --- a/share/man/man9/taskqueue.9 +++ b/share/man/man9/taskqueue.9 @@ -1,523 +1,520 @@ .\" -*- nroff -*- .\" .\" Copyright (c) 2000 Doug Rabson .\" .\" All rights reserved. .\" .\" This program is free software. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE DEVELOPERS ``AS IS'' AND ANY EXPRESS OR .\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES .\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. .\" IN NO EVENT SHALL THE DEVELOPERS BE LIABLE FOR ANY DIRECT, INDIRECT, .\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT .\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, .\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY .\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT .\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF .\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .\" .\" $FreeBSD$ .\" -.Dd June 6, 2020 +.Dd September 1, 2021 .Dt TASKQUEUE 9 .Os .Sh NAME .Nm taskqueue .Nd asynchronous task execution .Sh SYNOPSIS .In sys/param.h .In sys/kernel.h .In sys/malloc.h .In sys/queue.h .In sys/taskqueue.h .Bd -literal typedef void (*task_fn_t)(void *context, int pending); typedef void (*taskqueue_enqueue_fn)(void *context); struct task { STAILQ_ENTRY(task) ta_link; /* link for queue */ u_short ta_pending; /* count times queued */ u_short ta_priority; /* priority of task in queue */ task_fn_t ta_func; /* task handler */ void *ta_context; /* argument for handler */ }; enum taskqueue_callback_type { TASKQUEUE_CALLBACK_TYPE_INIT, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN, }; typedef void (*taskqueue_callback_fn)(void *context); struct timeout_task; .Ed .Ft struct taskqueue * .Fn taskqueue_create "const char *name" "int mflags" "taskqueue_enqueue_fn enqueue" "void *context" .Ft struct taskqueue * .Fn taskqueue_create_fast "const char *name" "int mflags" "taskqueue_enqueue_fn enqueue" "void *context" .Ft int .Fn taskqueue_start_threads "struct taskqueue **tqp" "int count" "int pri" "const char *name" "..." .Ft int .Fo taskqueue_start_threads_cpuset .Fa "struct taskqueue **tqp" "int count" "int pri" "cpuset_t *mask" .Fa "const char *name" "..." .Fc .Ft int .Fo taskqueue_start_threads_in_proc .Fa "struct taskqueue **tqp" "int count" "int pri" "struct proc *proc" .Fa "const char *name" "..." .Fc .Ft void .Fn taskqueue_set_callback "struct taskqueue *queue" "enum taskqueue_callback_type cb_type" "taskqueue_callback_fn callback" "void *context" .Ft void .Fn taskqueue_free "struct taskqueue *queue" .Ft int .Fn taskqueue_enqueue "struct taskqueue *queue" "struct task *task" .Ft int .Fn taskqueue_enqueue_timeout "struct taskqueue *queue" "struct timeout_task *timeout_task" "int ticks" .Ft int .Fn taskqueue_enqueue_timeout_sbt "struct taskqueue *queue" "struct timeout_task *timeout_task" "sbintime_t sbt" "sbintime_t pr" "int flags" .Ft int .Fn taskqueue_cancel "struct taskqueue *queue" "struct task *task" "u_int *pendp" .Ft int .Fn taskqueue_cancel_timeout "struct taskqueue *queue" "struct timeout_task *timeout_task" "u_int *pendp" .Ft void .Fn taskqueue_drain "struct taskqueue *queue" "struct task *task" .Ft void .Fn taskqueue_drain_timeout "struct taskqueue *queue" "struct timeout_task *timeout_task" .Ft void .Fn taskqueue_drain_all "struct taskqueue *queue" .Ft void .Fn taskqueue_quiesce "struct taskqueue *queue" .Ft void .Fn taskqueue_block "struct taskqueue *queue" .Ft void .Fn taskqueue_unblock "struct taskqueue *queue" .Ft int .Fn taskqueue_member "struct taskqueue *queue" "struct thread *td" .Ft void .Fn taskqueue_run "struct taskqueue *queue" .Fn TASK_INIT "struct task *task" "int priority" "task_fn_t func" "void *context" .Fn TASK_INITIALIZER "int priority" "task_fn_t func" "void *context" .Fn TASKQUEUE_DECLARE "name" .Fn TASKQUEUE_DEFINE "name" "taskqueue_enqueue_fn enqueue" "void *context" "init" .Fn TASKQUEUE_FAST_DEFINE "name" "taskqueue_enqueue_fn enqueue" "void *context" "init" .Fn TASKQUEUE_DEFINE_THREAD "name" .Fn TASKQUEUE_FAST_DEFINE_THREAD "name" .Fn TIMEOUT_TASK_INIT "struct taskqueue *queue" "struct timeout_task *timeout_task" "int priority" "task_fn_t func" "void *context" .Sh DESCRIPTION These functions provide a simple interface for asynchronous execution of code. .Pp The function .Fn taskqueue_create is used to create new queues. The arguments to .Fn taskqueue_create include a name that should be unique, a set of .Xr malloc 9 flags that specify whether the call to .Fn malloc is allowed to sleep, a function that is called from .Fn taskqueue_enqueue when a task is added to the queue, and a pointer to the memory location where the identity of the thread that services the queue is recorded. .\" XXX The rest of the sentence gets lots in relation to the first part. The function called from .Fn taskqueue_enqueue must arrange for the queue to be processed (for instance by scheduling a software interrupt or waking a kernel thread). The memory location where the thread identity is recorded is used to signal the service thread(s) to terminate--when this value is set to zero and the thread is signaled it will terminate. If the queue is intended for use in fast interrupt handlers .Fn taskqueue_create_fast should be used in place of .Fn taskqueue_create . .Pp The function .Fn taskqueue_free should be used to free the memory used by the queue. Any tasks that are on the queue will be executed at this time after which the thread servicing the queue will be signaled that it should exit. .Pp Once a taskqueue has been created, its threads should be started using .Fn taskqueue_start_threads , .Fn taskqueue_start_threads_cpuset or .Fn taskqueue_start_threads_in_proc . .Fn taskqueue_start_threads_cpuset takes a .Va cpuset argument which will cause the threads which are started for the taskqueue to be restricted to run on the given CPUs. .Fn taskqueue_start_threads_in_proc takes a .Va proc argument which will cause the threads which are started for the taskqueue to be assigned to the given kernel process. Callbacks may optionally be registered using .Fn taskqueue_set_callback . Currently, callbacks may be registered for the following purposes: .Bl -tag -width TASKQUEUE_CALLBACK_TYPE_SHUTDOWN .It Dv TASKQUEUE_CALLBACK_TYPE_INIT This callback is called by every thread in the taskqueue, before it executes any tasks. This callback must be set before the taskqueue's threads are started. .It Dv TASKQUEUE_CALLBACK_TYPE_SHUTDOWN This callback is called by every thread in the taskqueue, after it executes its last task. This callback will always be called before the taskqueue structure is reclaimed. .El .Pp To add a task to the list of tasks queued on a taskqueue, call .Fn taskqueue_enqueue with pointers to the queue and task. If the task's .Va ta_pending field is non-zero, then it is simply incremented to reflect the number of times the task was enqueued, up to a cap of USHRT_MAX. Otherwise, the task is added to the list before the first task which has a lower .Va ta_priority value or at the end of the list if no tasks have a lower priority. Enqueueing a task does not perform any memory allocation which makes it suitable for calling from an interrupt handler. This function will return .Er EPIPE if the queue is being freed. .Pp When a task is executed, first it is removed from the queue, the value of .Va ta_pending is recorded and then the field is zeroed. The function .Va ta_func from the task structure is called with the value of the field .Va ta_context as its first argument and the value of .Va ta_pending as its second argument. After the function .Va ta_func returns, .Xr wakeup 9 is called on the task pointer passed to .Fn taskqueue_enqueue . .Pp The .Fn taskqueue_enqueue_timeout function is used to schedule the enqueue after the specified number of .Va ticks . The .Fn taskqueue_enqueue_timeout_sbt function provides finer control over the scheduling based on .Va sbt , .Va pr , and .Va flags , as detailed in .Xr callout 9 . -Only non-fast task queues can be used for -.Va timeout_task -scheduling. If the .Va ticks argument is negative, the already scheduled enqueueing is not re-scheduled. Otherwise, the task is scheduled for enqueueing in the future, after the absolute value of .Va ticks is passed. This function returns -1 if the task is being drained. Otherwise, the number of pending calls is returned. .Pp The .Fn taskqueue_cancel function is used to cancel a task. The .Va ta_pending count is cleared, and the old value returned in the reference parameter .Fa pendp , if it is .Pf non- Dv NULL . If the task is currently running, .Dv EBUSY is returned, otherwise 0. To implement a blocking .Fn taskqueue_cancel that waits for a running task to finish, it could look like: .Bd -literal -offset indent while (taskqueue_cancel(tq, task, NULL) != 0) taskqueue_drain(tq, task); .Ed .Pp Note that, as with .Fn taskqueue_drain , the caller is responsible for ensuring that the task is not re-enqueued after being canceled. .Pp Similarly, the .Fn taskqueue_cancel_timeout function is used to cancel the scheduled task execution. .Pp The .Fn taskqueue_drain function is used to wait for the task to finish, and the .Fn taskqueue_drain_timeout function is used to wait for the scheduled task to finish. There is no guarantee that the task will not be enqueued after call to .Fn taskqueue_drain . If the caller wants to put the task into a known state, then before calling .Fn taskqueue_drain the caller should use out-of-band means to ensure that the task would not be enqueued. For example, if the task is enqueued by an interrupt filter, then the interrupt could be disabled. .Pp The .Fn taskqueue_drain_all function is used to wait for all pending and running tasks that are enqueued on the taskqueue to finish. Tasks posted to the taskqueue after .Fn taskqueue_drain_all begins processing, including pending enqueues scheduled by a previous call to .Fn taskqueue_enqueue_timeout , do not extend the wait time of .Fn taskqueue_drain_all and may complete after .Fn taskqueue_drain_all returns. The .Fn taskqueue_quiesce function is used to wait for the queue to become empty and for all running tasks to finish. To avoid blocking indefinitely, the caller must ensure by some mechanism that tasks will eventually stop being posted to the queue. .Pp The .Fn taskqueue_block function blocks the taskqueue. It prevents any enqueued but not running tasks from being executed. Future calls to .Fn taskqueue_enqueue will enqueue tasks, but the tasks will not be run until .Fn taskqueue_unblock is called. Please note that .Fn taskqueue_block does not wait for any currently running tasks to finish. Thus, the .Fn taskqueue_block does not provide a guarantee that .Fn taskqueue_run is not running after .Fn taskqueue_block returns, but it does provide a guarantee that .Fn taskqueue_run will not be called again until .Fn taskqueue_unblock is called. If the caller requires a guarantee that .Fn taskqueue_run is not running, then this must be arranged by the caller. Note that if .Fn taskqueue_drain is called on a task that is enqueued on a taskqueue that is blocked by .Fn taskqueue_block , then .Fn taskqueue_drain can not return until the taskqueue is unblocked. This can result in a deadlock if the thread blocked in .Fn taskqueue_drain is the thread that is supposed to call .Fn taskqueue_unblock . Thus, use of .Fn taskqueue_drain after .Fn taskqueue_block is discouraged, because the state of the task can not be known in advance. The same caveat applies to .Fn taskqueue_drain_all . .Pp The .Fn taskqueue_unblock function unblocks the previously blocked taskqueue. All enqueued tasks can be run after this call. .Pp The .Fn taskqueue_member function returns .No 1 if the given thread .Fa td is part of the given taskqueue .Fa queue and .No 0 otherwise. .Pp The .Fn taskqueue_run function will run all pending tasks in the specified .Fa queue . Normally this function is only used internally. .Pp A convenience macro, .Fn TASK_INIT "task" "priority" "func" "context" is provided to initialise a .Va task structure. The .Fn TASK_INITIALIZER macro generates an initializer for a task structure. A macro .Fn TIMEOUT_TASK_INIT "queue" "timeout_task" "priority" "func" "context" initializes the .Va timeout_task structure. The values of .Va priority , .Va func , and .Va context are simply copied into the task structure fields and the .Va ta_pending field is cleared. .Pp Five macros .Fn TASKQUEUE_DECLARE "name" , .Fn TASKQUEUE_DEFINE "name" "enqueue" "context" "init" , .Fn TASKQUEUE_FAST_DEFINE "name" "enqueue" "context" "init" , and .Fn TASKQUEUE_DEFINE_THREAD "name" .Fn TASKQUEUE_FAST_DEFINE_THREAD "name" are used to declare a reference to a global queue, to define the implementation of the queue, and declare a queue that uses its own thread. The .Fn TASKQUEUE_DEFINE macro arranges to call .Fn taskqueue_create with the values of its .Va name , .Va enqueue and .Va context arguments during system initialisation. After calling .Fn taskqueue_create , the .Va init argument to the macro is executed as a C statement, allowing any further initialisation to be performed (such as registering an interrupt handler, etc.). .Pp The .Fn TASKQUEUE_DEFINE_THREAD macro defines a new taskqueue with its own kernel thread to serve tasks. The variable .Vt struct taskqueue *taskqueue_name is used to enqueue tasks onto the queue. .Pp .Fn TASKQUEUE_FAST_DEFINE and .Fn TASKQUEUE_FAST_DEFINE_THREAD act just like .Fn TASKQUEUE_DEFINE and .Fn TASKQUEUE_DEFINE_THREAD respectively but taskqueue is created with .Fn taskqueue_create_fast . .Ss Predefined Task Queues The system provides four global taskqueues, .Va taskqueue_fast , .Va taskqueue_swi , .Va taskqueue_swi_giant , and .Va taskqueue_thread . The .Va taskqueue_fast queue is for swi handlers dispatched from fast interrupt handlers, where sleep mutexes cannot be used. The swi taskqueues are run via a software interrupt mechanism. The .Va taskqueue_swi queue runs without the protection of the .Va Giant kernel lock, and the .Va taskqueue_swi_giant queue runs with the protection of the .Va Giant kernel lock. The thread taskqueue .Va taskqueue_thread runs in a kernel thread context, and tasks run from this thread do not run under the .Va Giant kernel lock. If the caller wants to run under .Va Giant , he should explicitly acquire and release .Va Giant in his taskqueue handler routine. .Pp To use these queues, call .Fn taskqueue_enqueue with the value of the global taskqueue variable for the queue you wish to use. .Pp The software interrupt queues can be used, for instance, for implementing interrupt handlers which must perform a significant amount of processing in the handler. The hardware interrupt handler would perform minimal processing of the interrupt and then enqueue a task to finish the work. This reduces to a minimum the amount of time spent with interrupts disabled. .Pp The thread queue can be used, for instance, by interrupt level routines that need to call kernel functions that do things that can only be done from a thread context. (e.g., call malloc with the M_WAITOK flag.) .Pp Note that tasks queued on shared taskqueues such as .Va taskqueue_swi may be delayed an indeterminate amount of time before execution. If queueing delays cannot be tolerated then a private taskqueue should be created with a dedicated processing thread. .Sh SEE ALSO .Xr callout 9 , .Xr ithread 9 , .Xr kthread 9 , .Xr swi 9 .Sh HISTORY This interface first appeared in .Fx 5.0 . There is a similar facility called work_queue in the Linux kernel. .Sh AUTHORS This manual page was written by .An Doug Rabson . diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index a85420e62c7a..6343ad445623 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -1,1351 +1,1355 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Berkeley Software Design Inc's name may not be used to endorse or * promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ */ /* * Machine independent bits of mutex implementation. */ #include __FBSDID("$FreeBSD$"); #include "opt_adaptive_mutexes.h" #include "opt_ddb.h" #include "opt_hwpmc_hooks.h" #include "opt_sched.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) #define ADAPTIVE_MUTEXES #endif #ifdef HWPMC_HOOKS #include PMC_SOFT_DEFINE( , , lock, failed); #endif /* * Return the mutex address when the lock cookie address is provided. * This functionality assumes that struct mtx* have a member named mtx_lock. */ #define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock)) /* * Internal utility macros. */ #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED) static void assert_mtx(const struct lock_object *lock, int what); #ifdef DDB static void db_show_mtx(const struct lock_object *lock); #endif static void lock_mtx(struct lock_object *lock, uintptr_t how); static void lock_spin(struct lock_object *lock, uintptr_t how); #ifdef KDTRACE_HOOKS static int owner_mtx(const struct lock_object *lock, struct thread **owner); #endif static uintptr_t unlock_mtx(struct lock_object *lock); static uintptr_t unlock_spin(struct lock_object *lock); /* * Lock classes for sleep and spin mutexes. */ struct lock_class lock_class_mtx_sleep = { .lc_name = "sleep mutex", .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, .lc_assert = assert_mtx, #ifdef DDB .lc_ddb_show = db_show_mtx, #endif .lc_lock = lock_mtx, .lc_unlock = unlock_mtx, #ifdef KDTRACE_HOOKS .lc_owner = owner_mtx, #endif }; struct lock_class lock_class_mtx_spin = { .lc_name = "spin mutex", .lc_flags = LC_SPINLOCK | LC_RECURSABLE, .lc_assert = assert_mtx, #ifdef DDB .lc_ddb_show = db_show_mtx, #endif .lc_lock = lock_spin, .lc_unlock = unlock_spin, #ifdef KDTRACE_HOOKS .lc_owner = owner_mtx, #endif }; #ifdef ADAPTIVE_MUTEXES #ifdef MUTEX_CUSTOM_BACKOFF static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "mtx debugging"); static struct lock_delay_config __read_frequently mtx_delay; SYSCTL_U16(_debug_mtx, OID_AUTO, delay_base, CTLFLAG_RW, &mtx_delay.base, 0, ""); SYSCTL_U16(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max, 0, ""); LOCK_DELAY_SYSINIT_DEFAULT(mtx_delay); #else #define mtx_delay locks_delay #endif #endif #ifdef MUTEX_SPIN_CUSTOM_BACKOFF static SYSCTL_NODE(_debug, OID_AUTO, mtx_spin, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "mtx spin debugging"); static struct lock_delay_config __read_frequently mtx_spin_delay; SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_base, CTLFLAG_RW, &mtx_spin_delay.base, 0, ""); SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_spin_delay.max, 0, ""); LOCK_DELAY_SYSINIT_DEFAULT(mtx_spin_delay); #else #define mtx_spin_delay locks_delay #endif /* * System-wide mutexes */ struct mtx blocked_lock; struct mtx __exclusive_cache_line Giant; static void _mtx_lock_indefinite_check(struct mtx *, struct lock_delay_arg *); void assert_mtx(const struct lock_object *lock, int what) { /* * Treat LA_LOCKED as if LA_XLOCKED was asserted. * * Some callers of lc_assert uses LA_LOCKED to indicate that either * a shared lock or write lock was held, while other callers uses * the more strict LA_XLOCKED (used as MA_OWNED). * * Mutex is the only lock class that can not be shared, as a result, * we can reasonably consider the caller really intends to assert * LA_XLOCKED when they are asserting LA_LOCKED on a mutex object. */ if (what & LA_LOCKED) { what &= ~LA_LOCKED; what |= LA_XLOCKED; } mtx_assert((const struct mtx *)lock, what); } void lock_mtx(struct lock_object *lock, uintptr_t how) { mtx_lock((struct mtx *)lock); } void lock_spin(struct lock_object *lock, uintptr_t how) { - panic("spin locks can only use msleep_spin"); + mtx_lock_spin((struct mtx *)lock); } uintptr_t unlock_mtx(struct lock_object *lock) { struct mtx *m; m = (struct mtx *)lock; mtx_assert(m, MA_OWNED | MA_NOTRECURSED); mtx_unlock(m); return (0); } uintptr_t unlock_spin(struct lock_object *lock) { + struct mtx *m; - panic("spin locks can only use msleep_spin"); + m = (struct mtx *)lock; + mtx_assert(m, MA_OWNED | MA_NOTRECURSED); + mtx_unlock_spin(m); + return (0); } #ifdef KDTRACE_HOOKS int owner_mtx(const struct lock_object *lock, struct thread **owner) { const struct mtx *m; uintptr_t x; m = (const struct mtx *)lock; x = m->mtx_lock; *owner = (struct thread *)(x & ~MTX_FLAGMASK); return (*owner != NULL); } #endif /* * Function versions of the inlined __mtx_* macros. These are used by * modules and can also be called from assembly language if needed. */ void __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; uintptr_t tid, v; m = mtxlock2mtx(c); KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || !TD_IS_IDLETHREAD(curthread), ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d", curthread, m->lock_object.lo_name, file, line)); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_lock() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); tid = (uintptr_t)curthread; v = MTX_UNOWNED; if (!_mtx_obtain_lock_fetch(m, &v, tid)) _mtx_lock_sleep(m, v, opts, file, line); else LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, 0, 0, file, line); LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, line); WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE, file, line); TD_LOCKS_INC(curthread); } void __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; m = mtxlock2mtx(c); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_unlock() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, line); mtx_assert(m, MA_OWNED); #ifdef LOCK_PROFILING __mtx_unlock_sleep(c, (uintptr_t)curthread, opts, file, line); #else __mtx_unlock(m, curthread, opts, file, line); #endif TD_LOCKS_DEC(curthread); } void __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; #ifdef SMP uintptr_t tid, v; #endif m = mtxlock2mtx(c); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, ("mtx_lock_spin() of sleep mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); if (mtx_owned(m)) KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || (opts & MTX_RECURSE) != 0, ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n", m->lock_object.lo_name, file, line)); opts &= ~MTX_RECURSE; WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); #ifdef SMP spinlock_enter(); tid = (uintptr_t)curthread; v = MTX_UNOWNED; if (!_mtx_obtain_lock_fetch(m, &v, tid)) _mtx_lock_spin(m, v, opts, file, line); else LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m, 0, 0, file, line); #else __mtx_lock_spin(m, curthread, opts, file, line); #endif LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, line); WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); } int __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; if (SCHEDULER_STOPPED()) return (1); m = mtxlock2mtx(c); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, ("mtx_trylock_spin() of sleep mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); KASSERT((opts & MTX_RECURSE) == 0, ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n", m->lock_object.lo_name, file, line)); if (__mtx_trylock_spin(m, curthread, opts, file, line)) { LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line); WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); return (1); } LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line); return (0); } void __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; m = mtxlock2mtx(c); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, line); mtx_assert(m, MA_OWNED); __mtx_unlock_spin(m); } /* * The important part of mtx_trylock{,_flags}() * Tries to acquire lock `m.' If this function is called on a mutex that * is already owned, it will recursively acquire the lock. */ int _mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF) { struct thread *td; uintptr_t tid, v; #ifdef LOCK_PROFILING uint64_t waittime = 0; int contested = 0; #endif int rval; bool recursed; td = curthread; tid = (uintptr_t)td; if (SCHEDULER_STOPPED_TD(td)) return (1); KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td), ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d", curthread, m->lock_object.lo_name, file, line)); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_trylock() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); rval = 1; recursed = false; v = MTX_UNOWNED; for (;;) { if (_mtx_obtain_lock_fetch(m, &v, tid)) break; if (v == MTX_UNOWNED) continue; if (v == tid && ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || (opts & MTX_RECURSE) != 0)) { m->mtx_recurse++; atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); recursed = true; break; } rval = 0; break; } opts &= ~MTX_RECURSE; LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line); if (rval) { WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, file, line); TD_LOCKS_INC(curthread); if (!recursed) LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested, waittime, file, line); } return (rval); } int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; m = mtxlock2mtx(c); return (_mtx_trylock_flags_int(m, opts LOCK_FILE_LINE_ARG)); } /* * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. * * We call this if the lock is either contested (i.e. we need to go to * sleep waiting for it), or if we need to recurse on it. */ #if LOCK_DEBUG > 0 void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, const char *file, int line) #else void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v) #endif { struct thread *td; struct mtx *m; struct turnstile *ts; uintptr_t tid; struct thread *owner; #ifdef LOCK_PROFILING int contested = 0; uint64_t waittime = 0; #endif #if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS) struct lock_delay_arg lda; #endif #ifdef KDTRACE_HOOKS u_int sleep_cnt = 0; int64_t sleep_time = 0; int64_t all_time = 0; #endif #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) int doing_lockprof = 0; #endif td = curthread; tid = (uintptr_t)td; m = mtxlock2mtx(c); #ifdef KDTRACE_HOOKS if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) { while (v == MTX_UNOWNED) { if (_mtx_obtain_lock_fetch(m, &v, tid)) goto out_lockstat; } doing_lockprof = 1; all_time -= lockstat_nsecs(&m->lock_object); } #endif #ifdef LOCK_PROFILING doing_lockprof = 1; #endif if (SCHEDULER_STOPPED_TD(td)) return; if (__predict_false(v == MTX_UNOWNED)) v = MTX_READ_VALUE(m); if (__predict_false(lv_mtx_owner(v) == td)) { KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || (opts & MTX_RECURSE) != 0, ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", m->lock_object.lo_name, file, line)); #if LOCK_DEBUG > 0 opts &= ~MTX_RECURSE; #endif m->mtx_recurse++; atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); return; } #if LOCK_DEBUG > 0 opts &= ~MTX_RECURSE; #endif #if defined(ADAPTIVE_MUTEXES) lock_delay_arg_init(&lda, &mtx_delay); #elif defined(KDTRACE_HOOKS) lock_delay_arg_init_noadapt(&lda); #endif #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif lock_profile_obtain_lock_failed(&m->lock_object, false, &contested, &waittime); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR4(KTR_LOCK, "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", m->lock_object.lo_name, (void *)m->mtx_lock, file, line); for (;;) { if (v == MTX_UNOWNED) { if (_mtx_obtain_lock_fetch(m, &v, tid)) break; continue; } #ifdef KDTRACE_HOOKS lda.spin_cnt++; #endif #ifdef ADAPTIVE_MUTEXES /* * If the owner is running on another CPU, spin until the * owner stops running or the state of the lock changes. */ owner = lv_mtx_owner(v); if (TD_IS_RUNNING(owner)) { if (LOCK_LOG_TEST(&m->lock_object, 0)) CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, m, owner); KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), "spinning", "lockname:\"%s\"", m->lock_object.lo_name); do { lock_delay(&lda); v = MTX_READ_VALUE(m); owner = lv_mtx_owner(v); } while (v != MTX_UNOWNED && TD_IS_RUNNING(owner)); KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), "running"); continue; } #endif ts = turnstile_trywait(&m->lock_object); v = MTX_READ_VALUE(m); retry_turnstile: /* * Check if the lock has been released while spinning for * the turnstile chain lock. */ if (v == MTX_UNOWNED) { turnstile_cancel(ts); continue; } #ifdef ADAPTIVE_MUTEXES /* * The current lock owner might have started executing * on another CPU (or the lock could have changed * owners) while we were waiting on the turnstile * chain lock. If so, drop the turnstile lock and try * again. */ owner = lv_mtx_owner(v); if (TD_IS_RUNNING(owner)) { turnstile_cancel(ts); continue; } #endif /* * If the mutex isn't already contested and a failure occurs * setting the contested bit, the mutex was either released * or the state of the MTX_RECURSED bit changed. */ if ((v & MTX_CONTESTED) == 0 && !atomic_fcmpset_ptr(&m->mtx_lock, &v, v | MTX_CONTESTED)) { goto retry_turnstile; } /* * We definitely must sleep for this lock. */ mtx_assert(m, MA_NOTOWNED); /* * Block on the turnstile. */ #ifdef KDTRACE_HOOKS sleep_time -= lockstat_nsecs(&m->lock_object); #endif #ifndef ADAPTIVE_MUTEXES owner = mtx_owner(m); #endif MPASS(owner == mtx_owner(m)); turnstile_wait(ts, owner, TS_EXCLUSIVE_QUEUE); #ifdef KDTRACE_HOOKS sleep_time += lockstat_nsecs(&m->lock_object); sleep_cnt++; #endif v = MTX_READ_VALUE(m); } #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) if (__predict_true(!doing_lockprof)) return; #endif #ifdef KDTRACE_HOOKS all_time += lockstat_nsecs(&m->lock_object); if (sleep_time) LOCKSTAT_RECORD1(adaptive__block, m, sleep_time); /* * Only record the loops spinning and not sleeping. */ if (lda.spin_cnt > sleep_cnt) LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time); out_lockstat: #endif LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested, waittime, file, line); } #ifdef SMP /* * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock. * * This is only called if we need to actually spin for the lock. Recursion * is handled inline. */ #if LOCK_DEBUG > 0 void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts, const char *file, int line) #else void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v) #endif { struct mtx *m; struct lock_delay_arg lda; uintptr_t tid; #ifdef LOCK_PROFILING int contested = 0; uint64_t waittime = 0; #endif #ifdef KDTRACE_HOOKS int64_t spin_time = 0; #endif #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) int doing_lockprof = 0; #endif tid = (uintptr_t)curthread; m = mtxlock2mtx(c); #ifdef KDTRACE_HOOKS if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) { while (v == MTX_UNOWNED) { if (_mtx_obtain_lock_fetch(m, &v, tid)) goto out_lockstat; } doing_lockprof = 1; spin_time -= lockstat_nsecs(&m->lock_object); } #endif #ifdef LOCK_PROFILING doing_lockprof = 1; #endif if (__predict_false(v == MTX_UNOWNED)) v = MTX_READ_VALUE(m); if (__predict_false(v == tid)) { m->mtx_recurse++; return; } if (SCHEDULER_STOPPED()) return; if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), "spinning", "lockname:\"%s\"", m->lock_object.lo_name); lock_delay_arg_init(&lda, &mtx_spin_delay); #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif lock_profile_obtain_lock_failed(&m->lock_object, true, &contested, &waittime); for (;;) { if (v == MTX_UNOWNED) { if (_mtx_obtain_lock_fetch(m, &v, tid)) break; continue; } /* Give interrupts a chance while we spin. */ spinlock_exit(); do { if (__predict_true(lda.spin_cnt < 10000000)) { lock_delay(&lda); } else { _mtx_lock_indefinite_check(m, &lda); } v = MTX_READ_VALUE(m); } while (v != MTX_UNOWNED); spinlock_enter(); } if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), "running"); #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) if (__predict_true(!doing_lockprof)) return; #endif #ifdef KDTRACE_HOOKS spin_time += lockstat_nsecs(&m->lock_object); if (lda.spin_cnt != 0) LOCKSTAT_RECORD1(spin__spin, m, spin_time); out_lockstat: #endif LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m, contested, waittime, file, line); } #endif /* SMP */ #ifdef INVARIANTS static void thread_lock_validate(struct mtx *m, int opts, const char *file, int line) { KASSERT(m->mtx_lock != MTX_DESTROYED, ("thread_lock() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, ("thread_lock() of sleep mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) == 0, ("thread_lock: got a recursive mutex %s @ %s:%d\n", m->lock_object.lo_name, file, line)); WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); } #else #define thread_lock_validate(m, opts, file, line) do { } while (0) #endif #ifndef LOCK_PROFILING #if LOCK_DEBUG > 0 void _thread_lock(struct thread *td, int opts, const char *file, int line) #else void _thread_lock(struct thread *td) #endif { struct mtx *m; uintptr_t tid; tid = (uintptr_t)curthread; if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire))) goto slowpath_noirq; spinlock_enter(); m = td->td_lock; thread_lock_validate(m, 0, file, line); if (__predict_false(m == &blocked_lock)) goto slowpath_unlocked; if (__predict_false(!_mtx_obtain_lock(m, tid))) goto slowpath_unlocked; if (__predict_true(m == td->td_lock)) { WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line); return; } _mtx_release_lock_quick(m); slowpath_unlocked: spinlock_exit(); slowpath_noirq: #if LOCK_DEBUG > 0 thread_lock_flags_(td, opts, file, line); #else thread_lock_flags_(td, 0, 0, 0); #endif } #endif void thread_lock_flags_(struct thread *td, int opts, const char *file, int line) { struct mtx *m; uintptr_t tid, v; struct lock_delay_arg lda; #ifdef LOCK_PROFILING int contested = 0; uint64_t waittime = 0; #endif #ifdef KDTRACE_HOOKS int64_t spin_time = 0; #endif #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) int doing_lockprof = 1; #endif tid = (uintptr_t)curthread; if (SCHEDULER_STOPPED()) { /* * Ensure that spinlock sections are balanced even when the * scheduler is stopped, since we may otherwise inadvertently * re-enable interrupts while dumping core. */ spinlock_enter(); return; } lock_delay_arg_init(&lda, &mtx_spin_delay); #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif #ifdef LOCK_PROFILING doing_lockprof = 1; #elif defined(KDTRACE_HOOKS) doing_lockprof = lockstat_enabled; #endif #ifdef KDTRACE_HOOKS if (__predict_false(doing_lockprof)) spin_time -= lockstat_nsecs(&td->td_lock->lock_object); #endif spinlock_enter(); for (;;) { retry: m = td->td_lock; thread_lock_validate(m, opts, file, line); v = MTX_READ_VALUE(m); for (;;) { if (v == MTX_UNOWNED) { if (_mtx_obtain_lock_fetch(m, &v, tid)) break; continue; } MPASS(v != tid); lock_profile_obtain_lock_failed(&m->lock_object, true, &contested, &waittime); /* Give interrupts a chance while we spin. */ spinlock_exit(); do { if (__predict_true(lda.spin_cnt < 10000000)) { lock_delay(&lda); } else { _mtx_lock_indefinite_check(m, &lda); } if (m != td->td_lock) { spinlock_enter(); goto retry; } v = MTX_READ_VALUE(m); } while (v != MTX_UNOWNED); spinlock_enter(); } if (m == td->td_lock) break; _mtx_release_lock_quick(m); } LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, line); WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) if (__predict_true(!doing_lockprof)) return; #endif #ifdef KDTRACE_HOOKS spin_time += lockstat_nsecs(&m->lock_object); #endif LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m, contested, waittime, file, line); #ifdef KDTRACE_HOOKS if (lda.spin_cnt != 0) LOCKSTAT_RECORD1(thread__spin, m, spin_time); #endif } struct mtx * thread_lock_block(struct thread *td) { struct mtx *lock; lock = td->td_lock; mtx_assert(lock, MA_OWNED); td->td_lock = &blocked_lock; return (lock); } void thread_lock_unblock(struct thread *td, struct mtx *new) { mtx_assert(new, MA_OWNED); KASSERT(td->td_lock == &blocked_lock, ("thread %p lock %p not blocked_lock %p", td, td->td_lock, &blocked_lock)); atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new); } void thread_lock_block_wait(struct thread *td) { while (td->td_lock == &blocked_lock) cpu_spinwait(); /* Acquire fence to be certain that all thread state is visible. */ atomic_thread_fence_acq(); } void thread_lock_set(struct thread *td, struct mtx *new) { struct mtx *lock; mtx_assert(new, MA_OWNED); lock = td->td_lock; mtx_assert(lock, MA_OWNED); td->td_lock = new; mtx_unlock_spin(lock); } /* * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. * * We are only called here if the lock is recursed, contested (i.e. we * need to wake up a blocked thread) or lockstat probe is active. */ #if LOCK_DEBUG > 0 void __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, const char *file, int line) #else void __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v) #endif { struct mtx *m; struct turnstile *ts; uintptr_t tid; if (SCHEDULER_STOPPED()) return; tid = (uintptr_t)curthread; m = mtxlock2mtx(c); if (__predict_false(v == tid)) v = MTX_READ_VALUE(m); if (__predict_false(v & MTX_RECURSED)) { if (--(m->mtx_recurse) == 0) atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); return; } LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m); if (v == tid && _mtx_release_lock(m, tid)) return; /* * We have to lock the chain before the turnstile so this turnstile * can be removed from the hash list if it is empty. */ turnstile_chain_lock(&m->lock_object); _mtx_release_lock_quick(m); ts = turnstile_lookup(&m->lock_object); MPASS(ts != NULL); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); /* * This turnstile is now no longer associated with the mutex. We can * unlock the chain lock so a new turnstile may take it's place. */ turnstile_unpend(ts); turnstile_chain_unlock(&m->lock_object); } /* * All the unlocking of MTX_SPIN locks is done inline. * See the __mtx_unlock_spin() macro for the details. */ /* * The backing function for the INVARIANTS-enabled mtx_assert() */ #ifdef INVARIANT_SUPPORT void __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line) { const struct mtx *m; if (KERNEL_PANICKED() || dumping || SCHEDULER_STOPPED()) return; m = mtxlock2mtx(c); switch (what) { case MA_OWNED: case MA_OWNED | MA_RECURSED: case MA_OWNED | MA_NOTRECURSED: if (!mtx_owned(m)) panic("mutex %s not owned at %s:%d", m->lock_object.lo_name, file, line); if (mtx_recursed(m)) { if ((what & MA_NOTRECURSED) != 0) panic("mutex %s recursed at %s:%d", m->lock_object.lo_name, file, line); } else if ((what & MA_RECURSED) != 0) { panic("mutex %s unrecursed at %s:%d", m->lock_object.lo_name, file, line); } break; case MA_NOTOWNED: if (mtx_owned(m)) panic("mutex %s owned at %s:%d", m->lock_object.lo_name, file, line); break; default: panic("unknown mtx_assert at %s:%d", file, line); } } #endif /* * General init routine used by the MTX_SYSINIT() macro. */ void mtx_sysinit(void *arg) { struct mtx_args *margs = arg; mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); } /* * Mutex initialization routine; initialize lock `m' of type contained in * `opts' with options contained in `opts' and name `name.' The optional * lock type `type' is used as a general lock category name for use with * witness. */ void _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts) { struct mtx *m; struct lock_class *class; int flags; m = mtxlock2mtx(c); MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0); ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock, ("%s: mtx_lock not aligned for %s: %p", __func__, name, &m->mtx_lock)); /* Determine lock class and lock flags. */ if (opts & MTX_SPIN) class = &lock_class_mtx_spin; else class = &lock_class_mtx_sleep; flags = 0; if (opts & MTX_QUIET) flags |= LO_QUIET; if (opts & MTX_RECURSE) flags |= LO_RECURSABLE; if ((opts & MTX_NOWITNESS) == 0) flags |= LO_WITNESS; if (opts & MTX_DUPOK) flags |= LO_DUPOK; if (opts & MTX_NOPROFILE) flags |= LO_NOPROFILE; if (opts & MTX_NEW) flags |= LO_NEW; /* Initialize mutex. */ lock_init(&m->lock_object, class, name, type, flags); m->mtx_lock = MTX_UNOWNED; m->mtx_recurse = 0; } /* * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be * passed in as a flag here because if the corresponding mtx_init() was * called with MTX_QUIET set, then it will already be set in the mutex's * flags. */ void _mtx_destroy(volatile uintptr_t *c) { struct mtx *m; m = mtxlock2mtx(c); if (!mtx_owned(m)) MPASS(mtx_unowned(m)); else { MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); /* Perform the non-mtx related part of mtx_unlock_spin(). */ if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) { lock_profile_release_lock(&m->lock_object, true); spinlock_exit(); } else { TD_LOCKS_DEC(curthread); lock_profile_release_lock(&m->lock_object, false); } /* Tell witness this isn't locked to make it happy. */ WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__, __LINE__); } m->mtx_lock = MTX_DESTROYED; lock_destroy(&m->lock_object); } /* * Intialize the mutex code and system mutexes. This is called from the MD * startup code prior to mi_startup(). The per-CPU data space needs to be * setup before this is called. */ void mutex_init(void) { /* Setup turnstiles so that sleep mutexes work. */ init_turnstiles(); /* * Initialize mutexes. */ mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN); blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */ mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN); mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN); mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN); mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN); mtx_init(&devmtx, "cdev", NULL, MTX_DEF); mtx_lock(&Giant); } static void __noinline _mtx_lock_indefinite_check(struct mtx *m, struct lock_delay_arg *ldap) { struct thread *td; ldap->spin_cnt++; if (ldap->spin_cnt < 60000000 || kdb_active || KERNEL_PANICKED()) cpu_lock_delay(); else { td = mtx_owner(m); /* If the mutex is unlocked, try again. */ if (td == NULL) return; printf( "spin lock %p (%s) held by %p (tid %d) too long\n", m, m->lock_object.lo_name, td, td->td_tid); #ifdef WITNESS witness_display_spinlock(&m->lock_object, td, printf); #endif panic("spin lock held too long"); } cpu_spinwait(); } void mtx_spin_wait_unlocked(struct mtx *m) { struct lock_delay_arg lda; KASSERT(m->mtx_lock != MTX_DESTROYED, ("%s() of destroyed mutex %p", __func__, m)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, ("%s() of sleep mutex %p (%s)", __func__, m, m->lock_object.lo_name)); KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m, m->lock_object.lo_name)); lda.spin_cnt = 0; while (atomic_load_acq_ptr(&m->mtx_lock) != MTX_UNOWNED) { if (__predict_true(lda.spin_cnt < 10000000)) { cpu_spinwait(); lda.spin_cnt++; } else { _mtx_lock_indefinite_check(m, &lda); } } } void mtx_wait_unlocked(struct mtx *m) { struct thread *owner; uintptr_t v; KASSERT(m->mtx_lock != MTX_DESTROYED, ("%s() of destroyed mutex %p", __func__, m)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, ("%s() not a sleep mutex %p (%s)", __func__, m, m->lock_object.lo_name)); KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m, m->lock_object.lo_name)); for (;;) { v = atomic_load_acq_ptr(&m->mtx_lock); if (v == MTX_UNOWNED) { break; } owner = lv_mtx_owner(v); if (!TD_IS_RUNNING(owner)) { mtx_lock(m); mtx_unlock(m); break; } cpu_spinwait(); } } #ifdef DDB void db_show_mtx(const struct lock_object *lock) { struct thread *td; const struct mtx *m; m = (const struct mtx *)lock; db_printf(" flags: {"); if (LOCK_CLASS(lock) == &lock_class_mtx_spin) db_printf("SPIN"); else db_printf("DEF"); if (m->lock_object.lo_flags & LO_RECURSABLE) db_printf(", RECURSE"); if (m->lock_object.lo_flags & LO_DUPOK) db_printf(", DUPOK"); db_printf("}\n"); db_printf(" state: {"); if (mtx_unowned(m)) db_printf("UNOWNED"); else if (mtx_destroyed(m)) db_printf("DESTROYED"); else { db_printf("OWNED"); if (m->mtx_lock & MTX_CONTESTED) db_printf(", CONTESTED"); if (m->mtx_lock & MTX_RECURSED) db_printf(", RECURSED"); } db_printf("}\n"); if (!mtx_unowned(m) && !mtx_destroyed(m)) { td = mtx_owner(m); db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td, td->td_tid, td->td_proc->p_pid, td->td_name); if (mtx_recursed(m)) db_printf(" recursed: %d\n", m->mtx_recurse); } } #endif diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 7bf5193fb7b1..89b937315907 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -1,684 +1,686 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1990, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 */ #include __FBSDID("$FreeBSD$"); #include "opt_ktrace.h" #include "opt_sched.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef KTRACE #include #include #endif #ifdef EPOCH_TRACE #include #endif #include static void synch_setup(void *dummy); SYSINIT(synch_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, synch_setup, NULL); int hogticks; static const char pause_wchan[MAXCPU]; static struct callout loadav_callout; struct loadavg averunnable = { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */ /* * Constants for averages over 1, 5, and 15 minutes * when sampling at 5 second intervals. */ static fixpt_t cexp[3] = { 0.9200444146293232 * FSCALE, /* exp(-1/12) */ 0.9834714538216174 * FSCALE, /* exp(-1/60) */ 0.9944598480048967 * FSCALE, /* exp(-1/180) */ }; /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */ SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, FSCALE, "Fixed-point scale factor used for calculating load average values"); static void loadav(void *arg); SDT_PROVIDER_DECLARE(sched); SDT_PROBE_DEFINE(sched, , , preempt); static void sleepinit(void *unused) { hogticks = (hz / 10) * 2; /* Default only. */ init_sleepqueues(); } /* * vmem tries to lock the sleepq mutexes when free'ing kva, so make sure * it is available. */ SYSINIT(sleepinit, SI_SUB_KMEM, SI_ORDER_ANY, sleepinit, NULL); /* * General sleep call. Suspends the current thread until a wakeup is * performed on the specified identifier. The thread will then be made * runnable with the specified priority. Sleeps at most sbt units of time * (0 means no timeout). If pri includes the PCATCH flag, let signals * interrupt the sleep, otherwise ignore them while sleeping. Returns 0 if * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a * signal becomes pending, ERESTART is returned if the current system * call should be restarted if possible, and EINTR is returned if the system * call should be interrupted by the signal (return EINTR). * * The lock argument is unlocked before the caller is suspended, and * re-locked before _sleep() returns. If priority includes the PDROP * flag the lock is not re-locked before returning. */ int _sleep(const void *ident, struct lock_object *lock, int priority, const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags) { struct thread *td; struct lock_class *class; uintptr_t lock_state; int catch, pri, rval, sleepq_flags; WITNESS_SAVE_DECL(lock_witness); td = curthread; #ifdef KTRACE if (KTRPOINT(td, KTR_CSW)) ktrcsw(1, 0, wmesg); #endif WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock, "Sleeping on \"%s\"", wmesg); KASSERT(sbt != 0 || mtx_owned(&Giant) || lock != NULL || (priority & PNOLOCK) != 0, ("sleeping without a lock")); KASSERT(ident != NULL, ("_sleep: NULL ident")); KASSERT(TD_IS_RUNNING(td), ("_sleep: curthread not running")); if (priority & PDROP) KASSERT(lock != NULL && lock != &Giant.lock_object, ("PDROP requires a non-Giant lock")); if (lock != NULL) class = LOCK_CLASS(lock); else class = NULL; if (SCHEDULER_STOPPED_TD(td)) { if (lock != NULL && priority & PDROP) class->lc_unlock(lock); return (0); } catch = priority & PCATCH; pri = priority & PRIMASK; KASSERT(!TD_ON_SLEEPQ(td), ("recursive sleep")); if ((uintptr_t)ident >= (uintptr_t)&pause_wchan[0] && (uintptr_t)ident <= (uintptr_t)&pause_wchan[MAXCPU - 1]) sleepq_flags = SLEEPQ_PAUSE; else sleepq_flags = SLEEPQ_SLEEP; if (catch) sleepq_flags |= SLEEPQ_INTERRUPTIBLE; sleepq_lock(ident); CTR5(KTR_PROC, "sleep: thread %ld (pid %ld, %s) on %s (%p)", td->td_tid, td->td_proc->p_pid, td->td_name, wmesg, ident); if (lock == &Giant.lock_object) mtx_assert(&Giant, MA_OWNED); DROP_GIANT(); if (lock != NULL && lock != &Giant.lock_object && !(class->lc_flags & LC_SLEEPABLE)) { + KASSERT(!(class->lc_flags & LC_SPINLOCK), + ("spin locks can only use msleep_spin")); WITNESS_SAVE(lock, lock_witness); lock_state = class->lc_unlock(lock); } else /* GCC needs to follow the Yellow Brick Road */ lock_state = -1; /* * We put ourselves on the sleep queue and start our timeout * before calling thread_suspend_check, as we could stop there, * and a wakeup or a SIGCONT (or both) could occur while we were * stopped without resuming us. Thus, we must be ready for sleep * when cursig() is called. If the wakeup happens while we're * stopped, then td will no longer be on a sleep queue upon * return from cursig(). */ sleepq_add(ident, lock, wmesg, sleepq_flags, 0); if (sbt != 0) sleepq_set_timeout_sbt(ident, sbt, pr, flags); if (lock != NULL && class->lc_flags & LC_SLEEPABLE) { sleepq_release(ident); WITNESS_SAVE(lock, lock_witness); lock_state = class->lc_unlock(lock); sleepq_lock(ident); } if (sbt != 0 && catch) rval = sleepq_timedwait_sig(ident, pri); else if (sbt != 0) rval = sleepq_timedwait(ident, pri); else if (catch) rval = sleepq_wait_sig(ident, pri); else { sleepq_wait(ident, pri); rval = 0; } #ifdef KTRACE if (KTRPOINT(td, KTR_CSW)) ktrcsw(0, 0, wmesg); #endif PICKUP_GIANT(); if (lock != NULL && lock != &Giant.lock_object && !(priority & PDROP)) { class->lc_lock(lock, lock_state); WITNESS_RESTORE(lock, lock_witness); } return (rval); } int msleep_spin_sbt(const void *ident, struct mtx *mtx, const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags) { struct thread *td; int rval; WITNESS_SAVE_DECL(mtx); td = curthread; KASSERT(mtx != NULL, ("sleeping without a mutex")); KASSERT(ident != NULL, ("msleep_spin_sbt: NULL ident")); KASSERT(TD_IS_RUNNING(td), ("msleep_spin_sbt: curthread not running")); if (SCHEDULER_STOPPED_TD(td)) return (0); sleepq_lock(ident); CTR5(KTR_PROC, "msleep_spin: thread %ld (pid %ld, %s) on %s (%p)", td->td_tid, td->td_proc->p_pid, td->td_name, wmesg, ident); DROP_GIANT(); mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED); WITNESS_SAVE(&mtx->lock_object, mtx); mtx_unlock_spin(mtx); /* * We put ourselves on the sleep queue and start our timeout. */ sleepq_add(ident, &mtx->lock_object, wmesg, SLEEPQ_SLEEP, 0); if (sbt != 0) sleepq_set_timeout_sbt(ident, sbt, pr, flags); /* * Can't call ktrace with any spin locks held so it can lock the * ktrace_mtx lock, and WITNESS_WARN considers it an error to hold * any spin lock. Thus, we have to drop the sleepq spin lock while * we handle those requests. This is safe since we have placed our * thread on the sleep queue already. */ #ifdef KTRACE if (KTRPOINT(td, KTR_CSW)) { sleepq_release(ident); ktrcsw(1, 0, wmesg); sleepq_lock(ident); } #endif #ifdef WITNESS sleepq_release(ident); WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "Sleeping on \"%s\"", wmesg); sleepq_lock(ident); #endif if (sbt != 0) rval = sleepq_timedwait(ident, 0); else { sleepq_wait(ident, 0); rval = 0; } #ifdef KTRACE if (KTRPOINT(td, KTR_CSW)) ktrcsw(0, 0, wmesg); #endif PICKUP_GIANT(); mtx_lock_spin(mtx); WITNESS_RESTORE(&mtx->lock_object, mtx); return (rval); } /* * pause_sbt() delays the calling thread by the given signed binary * time. During cold bootup, pause_sbt() uses the DELAY() function * instead of the _sleep() function to do the waiting. The "sbt" * argument must be greater than or equal to zero. A "sbt" value of * zero is equivalent to a "sbt" value of one tick. */ int pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags) { KASSERT(sbt >= 0, ("pause_sbt: timeout must be >= 0")); /* silently convert invalid timeouts */ if (sbt == 0) sbt = tick_sbt; if ((cold && curthread == &thread0) || kdb_active || SCHEDULER_STOPPED()) { /* * We delay one second at a time to avoid overflowing the * system specific DELAY() function(s): */ while (sbt >= SBT_1S) { DELAY(1000000); sbt -= SBT_1S; } /* Do the delay remainder, if any */ sbt = howmany(sbt, SBT_1US); if (sbt > 0) DELAY(sbt); return (EWOULDBLOCK); } return (_sleep(&pause_wchan[curcpu], NULL, (flags & C_CATCH) ? PCATCH : 0, wmesg, sbt, pr, flags)); } /* * Make all threads sleeping on the specified identifier runnable. */ void wakeup(const void *ident) { int wakeup_swapper; sleepq_lock(ident); wakeup_swapper = sleepq_broadcast(ident, SLEEPQ_SLEEP, 0, 0); sleepq_release(ident); if (wakeup_swapper) { KASSERT(ident != &proc0, ("wakeup and wakeup_swapper and proc0")); kick_proc0(); } } /* * Make a thread sleeping on the specified identifier runnable. * May wake more than one thread if a target thread is currently * swapped out. */ void wakeup_one(const void *ident) { int wakeup_swapper; sleepq_lock(ident); wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP | SLEEPQ_DROP, 0, 0); if (wakeup_swapper) kick_proc0(); } void wakeup_any(const void *ident) { int wakeup_swapper; sleepq_lock(ident); wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP | SLEEPQ_UNFAIR | SLEEPQ_DROP, 0, 0); if (wakeup_swapper) kick_proc0(); } /* * Signal sleeping waiters after the counter has reached zero. */ void _blockcount_wakeup(blockcount_t *bc, u_int old) { KASSERT(_BLOCKCOUNT_WAITERS(old), ("%s: no waiters on %p", __func__, bc)); if (atomic_cmpset_int(&bc->__count, _BLOCKCOUNT_WAITERS_FLAG, 0)) wakeup(bc); } /* * Wait for a wakeup or a signal. This does not guarantee that the count is * still zero on return. Callers wanting a precise answer should use * blockcount_wait() with an interlock. * * If there is no work to wait for, return 0. If the sleep was interrupted by a * signal, return EINTR or ERESTART, and return EAGAIN otherwise. */ int _blockcount_sleep(blockcount_t *bc, struct lock_object *lock, const char *wmesg, int prio) { void *wchan; uintptr_t lock_state; u_int old; int ret; bool catch, drop; KASSERT(lock != &Giant.lock_object, ("%s: cannot use Giant as the interlock", __func__)); catch = (prio & PCATCH) != 0; drop = (prio & PDROP) != 0; prio &= PRIMASK; /* * Synchronize with the fence in blockcount_release(). If we end up * waiting, the sleepqueue lock acquisition will provide the required * side effects. * * If there is no work to wait for, but waiters are present, try to put * ourselves to sleep to avoid jumping ahead. */ if (atomic_load_acq_int(&bc->__count) == 0) { if (lock != NULL && drop) LOCK_CLASS(lock)->lc_unlock(lock); return (0); } lock_state = 0; wchan = bc; sleepq_lock(wchan); DROP_GIANT(); if (lock != NULL) lock_state = LOCK_CLASS(lock)->lc_unlock(lock); old = blockcount_read(bc); ret = 0; do { if (_BLOCKCOUNT_COUNT(old) == 0) { sleepq_release(wchan); goto out; } if (_BLOCKCOUNT_WAITERS(old)) break; } while (!atomic_fcmpset_int(&bc->__count, &old, old | _BLOCKCOUNT_WAITERS_FLAG)); sleepq_add(wchan, NULL, wmesg, catch ? SLEEPQ_INTERRUPTIBLE : 0, 0); if (catch) ret = sleepq_wait_sig(wchan, prio); else sleepq_wait(wchan, prio); if (ret == 0) ret = EAGAIN; out: PICKUP_GIANT(); if (lock != NULL && !drop) LOCK_CLASS(lock)->lc_lock(lock, lock_state); return (ret); } static void kdb_switch(void) { thread_unlock(curthread); kdb_backtrace(); kdb_reenter(); panic("%s: did not reenter debugger", __func__); } /* * The machine independent parts of context switching. * * The thread lock is required on entry and is no longer held on return. */ void mi_switch(int flags) { uint64_t runtime, new_switchtime; struct thread *td; td = curthread; /* XXX */ THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED); KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code")); #ifdef INVARIANTS if (!TD_ON_LOCK(td) && !TD_IS_RUNNING(td)) mtx_assert(&Giant, MA_NOTOWNED); #endif KASSERT(td->td_critnest == 1 || KERNEL_PANICKED(), ("mi_switch: switch in a critical section")); KASSERT((flags & (SW_INVOL | SW_VOL)) != 0, ("mi_switch: switch must be voluntary or involuntary")); /* * Don't perform context switches from the debugger. */ if (kdb_active) kdb_switch(); if (SCHEDULER_STOPPED_TD(td)) return; if (flags & SW_VOL) { td->td_ru.ru_nvcsw++; td->td_swvoltick = ticks; } else { td->td_ru.ru_nivcsw++; td->td_swinvoltick = ticks; } #ifdef SCHED_STATS SCHED_STAT_INC(sched_switch_stats[flags & SW_TYPE_MASK]); #endif /* * Compute the amount of time during which the current * thread was running, and add that to its total so far. */ new_switchtime = cpu_ticks(); runtime = new_switchtime - PCPU_GET(switchtime); td->td_runtime += runtime; td->td_incruntime += runtime; PCPU_SET(switchtime, new_switchtime); td->td_generation++; /* bump preempt-detect counter */ VM_CNT_INC(v_swtch); PCPU_SET(switchticks, ticks); CTR4(KTR_PROC, "mi_switch: old thread %ld (td_sched %p, pid %ld, %s)", td->td_tid, td_get_sched(td), td->td_proc->p_pid, td->td_name); #ifdef KDTRACE_HOOKS if (SDT_PROBES_ENABLED() && ((flags & SW_PREEMPT) != 0 || ((flags & SW_INVOL) != 0 && (flags & SW_TYPE_MASK) == SWT_NEEDRESCHED))) SDT_PROBE0(sched, , , preempt); #endif sched_switch(td, flags); CTR4(KTR_PROC, "mi_switch: new thread %ld (td_sched %p, pid %ld, %s)", td->td_tid, td_get_sched(td), td->td_proc->p_pid, td->td_name); /* * If the last thread was exiting, finish cleaning it up. */ if ((td = PCPU_GET(deadthread))) { PCPU_SET(deadthread, NULL); thread_stash(td); } spinlock_exit(); } /* * Change thread state to be runnable, placing it on the run queue if * it is in memory. If it is swapped out, return true so our caller * will know to awaken the swapper. * * Requires the thread lock on entry, drops on exit. */ int setrunnable(struct thread *td, int srqflags) { int swapin; THREAD_LOCK_ASSERT(td, MA_OWNED); KASSERT(td->td_proc->p_state != PRS_ZOMBIE, ("setrunnable: pid %d is a zombie", td->td_proc->p_pid)); swapin = 0; switch (TD_GET_STATE(td)) { case TDS_RUNNING: case TDS_RUNQ: break; case TDS_CAN_RUN: KASSERT((td->td_flags & TDF_INMEM) != 0, ("setrunnable: td %p not in mem, flags 0x%X inhibit 0x%X", td, td->td_flags, td->td_inhibitors)); /* unlocks thread lock according to flags */ sched_wakeup(td, srqflags); return (0); case TDS_INHIBITED: /* * If we are only inhibited because we are swapped out * arrange to swap in this process. */ if (td->td_inhibitors == TDI_SWAPPED && (td->td_flags & TDF_SWAPINREQ) == 0) { td->td_flags |= TDF_SWAPINREQ; swapin = 1; } break; default: panic("setrunnable: state 0x%x", TD_GET_STATE(td)); } if ((srqflags & (SRQ_HOLD | SRQ_HOLDTD)) == 0) thread_unlock(td); return (swapin); } /* * Compute a tenex style load average of a quantity on * 1, 5 and 15 minute intervals. */ static void loadav(void *arg) { int i, nrun; struct loadavg *avg; nrun = sched_load(); avg = &averunnable; for (i = 0; i < 3; i++) avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; /* * Schedule the next update to occur after 5 seconds, but add a * random variation to avoid synchronisation with processes that * run at regular intervals. */ callout_reset_sbt(&loadav_callout, SBT_1US * (4000000 + (int)(random() % 2000001)), SBT_1US, loadav, NULL, C_DIRECT_EXEC | C_PREL(32)); } /* ARGSUSED */ static void synch_setup(void *dummy) { callout_init(&loadav_callout, 1); /* Kick off timeout driven events by calling first time. */ loadav(NULL); } int should_yield(void) { return ((u_int)ticks - (u_int)curthread->td_swvoltick >= hogticks); } void maybe_yield(void) { if (should_yield()) kern_yield(PRI_USER); } void kern_yield(int prio) { struct thread *td; td = curthread; DROP_GIANT(); thread_lock(td); if (prio == PRI_USER) prio = td->td_user_pri; if (prio >= 0) sched_prio(td, prio); mi_switch(SW_VOL | SWT_RELINQUISH); PICKUP_GIANT(); } /* * General purpose yield system call. */ int sys_yield(struct thread *td, struct yield_args *uap) { thread_lock(td); if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) sched_prio(td, PRI_MAX_TIMESHARE); mi_switch(SW_VOL | SWT_RELINQUISH); td->td_retval[0] = 0; return (0); } diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c index d9cf2784f642..992a093d30ab 100644 --- a/sys/kern/kern_timeout.c +++ b/sys/kern/kern_timeout.c @@ -1,1519 +1,1519 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 */ #include __FBSDID("$FreeBSD$"); #include "opt_callout_profiling.h" #include "opt_ddb.h" #include "opt_rss.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #include #include #endif #ifdef SMP #include #endif DPCPU_DECLARE(sbintime_t, hardclocktime); SDT_PROVIDER_DEFINE(callout_execute); SDT_PROBE_DEFINE1(callout_execute, , , callout__start, "struct callout *"); SDT_PROBE_DEFINE1(callout_execute, , , callout__end, "struct callout *"); #ifdef CALLOUT_PROFILING static int avg_depth; SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, "Average number of items examined per softclock call. Units = 1/1000"); static int avg_gcalls; SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, "Average number of Giant callouts made per softclock call. Units = 1/1000"); static int avg_lockcalls; SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, "Average number of lock callouts made per softclock call. Units = 1/1000"); static int avg_mpcalls; SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, "Average number of MP callouts made per softclock call. Units = 1/1000"); static int avg_depth_dir; SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0, "Average number of direct callouts examined per callout_process call. " "Units = 1/1000"); static int avg_lockcalls_dir; SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD, &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per " "callout_process call. Units = 1/1000"); static int avg_mpcalls_dir; SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir, 0, "Average number of MP direct callouts made per callout_process call. " "Units = 1/1000"); #endif static int ncallout; SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &ncallout, 0, "Number of entries in callwheel and size of timeout() preallocation"); #ifdef RSS static int pin_default_swi = 1; static int pin_pcpu_swi = 1; #else static int pin_default_swi = 0; static int pin_pcpu_swi = 0; #endif SYSCTL_INT(_kern, OID_AUTO, pin_default_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_default_swi, 0, "Pin the default (non-per-cpu) swi (shared with PCPU 0 swi)"); SYSCTL_INT(_kern, OID_AUTO, pin_pcpu_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_pcpu_swi, 0, "Pin the per-CPU swis (except PCPU 0, which is also default)"); /* * TODO: * allocate more timeout table slots when table overflows. */ static u_int __read_mostly callwheelsize; static u_int __read_mostly callwheelmask; /* * The callout cpu exec entities represent informations necessary for * describing the state of callouts currently running on the CPU and the ones * necessary for migrating callouts to the new callout cpu. In particular, * the first entry of the array cc_exec_entity holds informations for callout * running in SWI thread context, while the second one holds informations * for callout running directly from hardware interrupt context. * The cached informations are very important for deferring migration when * the migrating callout is already running. */ struct cc_exec { struct callout *cc_curr; callout_func_t *cc_drain; void *cc_last_func; void *cc_last_arg; #ifdef SMP callout_func_t *ce_migration_func; void *ce_migration_arg; sbintime_t ce_migration_time; sbintime_t ce_migration_prec; int ce_migration_cpu; #endif bool cc_cancel; bool cc_waiting; }; /* * There is one struct callout_cpu per cpu, holding all relevant * state for the callout processing thread on the individual CPU. */ struct callout_cpu { struct mtx_padalign cc_lock; struct cc_exec cc_exec_entity[2]; struct callout *cc_next; struct callout_list *cc_callwheel; struct callout_tailq cc_expireq; sbintime_t cc_firstevent; sbintime_t cc_lastscan; void *cc_cookie; u_int cc_bucket; u_int cc_inited; #ifdef KTR char cc_ktr_event_name[20]; #endif }; #define callout_migrating(c) ((c)->c_iflags & CALLOUT_DFRMIGRATION) #define cc_exec_curr(cc, dir) cc->cc_exec_entity[dir].cc_curr #define cc_exec_last_func(cc, dir) cc->cc_exec_entity[dir].cc_last_func #define cc_exec_last_arg(cc, dir) cc->cc_exec_entity[dir].cc_last_arg #define cc_exec_drain(cc, dir) cc->cc_exec_entity[dir].cc_drain #define cc_exec_next(cc) cc->cc_next #define cc_exec_cancel(cc, dir) cc->cc_exec_entity[dir].cc_cancel #define cc_exec_waiting(cc, dir) cc->cc_exec_entity[dir].cc_waiting #ifdef SMP #define cc_migration_func(cc, dir) cc->cc_exec_entity[dir].ce_migration_func #define cc_migration_arg(cc, dir) cc->cc_exec_entity[dir].ce_migration_arg #define cc_migration_cpu(cc, dir) cc->cc_exec_entity[dir].ce_migration_cpu #define cc_migration_time(cc, dir) cc->cc_exec_entity[dir].ce_migration_time #define cc_migration_prec(cc, dir) cc->cc_exec_entity[dir].ce_migration_prec static struct callout_cpu cc_cpu[MAXCPU]; #define CPUBLOCK MAXCPU #define CC_CPU(cpu) (&cc_cpu[(cpu)]) #define CC_SELF() CC_CPU(PCPU_GET(cpuid)) #else static struct callout_cpu cc_cpu; #define CC_CPU(cpu) (&cc_cpu) #define CC_SELF() (&cc_cpu) #endif #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock) #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED) static int __read_mostly cc_default_cpu; static void callout_cpu_init(struct callout_cpu *cc, int cpu); static void softclock_call_cc(struct callout *c, struct callout_cpu *cc, #ifdef CALLOUT_PROFILING int *mpcalls, int *lockcalls, int *gcalls, #endif int direct); static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); /** * Locked by cc_lock: * cc_curr - If a callout is in progress, it is cc_curr. * If cc_curr is non-NULL, threads waiting in * callout_drain() will be woken up as soon as the * relevant callout completes. * cc_cancel - Changing to 1 with both callout_lock and cc_lock held * guarantees that the current callout will not run. * The softclock() function sets this to 0 before it * drops callout_lock to acquire c_lock, and it calls * the handler only if curr_cancelled is still 0 after * cc_lock is successfully acquired. * cc_waiting - If a thread is waiting in callout_drain(), then * callout_wait is nonzero. Set only when * cc_curr is non-NULL. */ /* * Resets the execution entity tied to a specific callout cpu. */ static void cc_cce_cleanup(struct callout_cpu *cc, int direct) { cc_exec_curr(cc, direct) = NULL; cc_exec_cancel(cc, direct) = false; cc_exec_waiting(cc, direct) = false; #ifdef SMP cc_migration_cpu(cc, direct) = CPUBLOCK; cc_migration_time(cc, direct) = 0; cc_migration_prec(cc, direct) = 0; cc_migration_func(cc, direct) = NULL; cc_migration_arg(cc, direct) = NULL; #endif } /* * Checks if migration is requested by a specific callout cpu. */ static int cc_cce_migrating(struct callout_cpu *cc, int direct) { #ifdef SMP return (cc_migration_cpu(cc, direct) != CPUBLOCK); #else return (0); #endif } /* * Kernel low level callwheel initialization * called on the BSP during kernel startup. */ static void callout_callwheel_init(void *dummy) { struct callout_cpu *cc; int cpu; /* * Calculate the size of the callout wheel and the preallocated * timeout() structures. * XXX: Clip callout to result of previous function of maxusers * maximum 384. This is still huge, but acceptable. */ ncallout = imin(16 + maxproc + maxfiles, 18508); TUNABLE_INT_FETCH("kern.ncallout", &ncallout); /* * Calculate callout wheel size, should be next power of two higher * than 'ncallout'. */ callwheelsize = 1 << fls(ncallout); callwheelmask = callwheelsize - 1; /* * Fetch whether we're pinning the swi's or not. */ TUNABLE_INT_FETCH("kern.pin_default_swi", &pin_default_swi); TUNABLE_INT_FETCH("kern.pin_pcpu_swi", &pin_pcpu_swi); /* * Initialize callout wheels. The software interrupt threads * are created later. */ cc_default_cpu = PCPU_GET(cpuid); CPU_FOREACH(cpu) { cc = CC_CPU(cpu); callout_cpu_init(cc, cpu); } } SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL); /* * Initialize the per-cpu callout structures. */ static void callout_cpu_init(struct callout_cpu *cc, int cpu) { int i; mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); cc->cc_inited = 1; cc->cc_callwheel = malloc_domainset(sizeof(struct callout_list) * callwheelsize, M_CALLOUT, DOMAINSET_PREF(pcpu_find(cpu)->pc_domain), M_WAITOK); for (i = 0; i < callwheelsize; i++) LIST_INIT(&cc->cc_callwheel[i]); TAILQ_INIT(&cc->cc_expireq); cc->cc_firstevent = SBT_MAX; for (i = 0; i < 2; i++) cc_cce_cleanup(cc, i); #ifdef KTR snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name), "callwheel cpu %d", cpu); #endif } #ifdef SMP /* * Switches the cpu tied to a specific callout. * The function expects a locked incoming callout cpu and returns with * locked outcoming callout cpu. */ static struct callout_cpu * callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu) { struct callout_cpu *new_cc; MPASS(c != NULL && cc != NULL); CC_LOCK_ASSERT(cc); /* * Avoid interrupts and preemption firing after the callout cpu * is blocked in order to avoid deadlocks as the new thread * may be willing to acquire the callout cpu lock. */ c->c_cpu = CPUBLOCK; spinlock_enter(); CC_UNLOCK(cc); new_cc = CC_CPU(new_cpu); CC_LOCK(new_cc); spinlock_exit(); c->c_cpu = new_cpu; return (new_cc); } #endif /* * Start softclock threads. */ static void start_softclock(void *dummy) { struct callout_cpu *cc; char name[MAXCOMLEN]; int cpu; bool pin_swi; struct intr_event *ie; CPU_FOREACH(cpu) { cc = CC_CPU(cpu); snprintf(name, sizeof(name), "clock (%d)", cpu); ie = NULL; if (swi_add(&ie, name, softclock, cc, SWI_CLOCK, INTR_MPSAFE, &cc->cc_cookie)) panic("died while creating standard software ithreads"); if (cpu == cc_default_cpu) pin_swi = pin_default_swi; else pin_swi = pin_pcpu_swi; if (pin_swi && (intr_event_bind(ie, cpu) != 0)) { printf("%s: %s clock couldn't be pinned to cpu %d\n", __func__, cpu == cc_default_cpu ? "default" : "per-cpu", cpu); } } } SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL); #define CC_HASH_SHIFT 8 static inline u_int callout_hash(sbintime_t sbt) { return (sbt >> (32 - CC_HASH_SHIFT)); } static inline u_int callout_get_bucket(sbintime_t sbt) { return (callout_hash(sbt) & callwheelmask); } void callout_process(sbintime_t now) { struct callout *tmp, *tmpn; struct callout_cpu *cc; struct callout_list *sc; sbintime_t first, last, max, tmp_max; uint32_t lookahead; u_int firstb, lastb, nowb; #ifdef CALLOUT_PROFILING int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0; #endif cc = CC_SELF(); mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); /* Compute the buckets of the last scan and present times. */ firstb = callout_hash(cc->cc_lastscan); cc->cc_lastscan = now; nowb = callout_hash(now); /* Compute the last bucket and minimum time of the bucket after it. */ if (nowb == firstb) lookahead = (SBT_1S / 16); else if (nowb - firstb == 1) lookahead = (SBT_1S / 8); else lookahead = (SBT_1S / 2); first = last = now; first += (lookahead / 2); last += lookahead; last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT)); lastb = callout_hash(last) - 1; max = last; /* * Check if we wrapped around the entire wheel from the last scan. * In case, we need to scan entirely the wheel for pending callouts. */ if (lastb - firstb >= callwheelsize) { lastb = firstb + callwheelsize - 1; if (nowb - firstb >= callwheelsize) nowb = lastb; } /* Iterate callwheel from firstb to nowb and then up to lastb. */ do { sc = &cc->cc_callwheel[firstb & callwheelmask]; tmp = LIST_FIRST(sc); while (tmp != NULL) { /* Run the callout if present time within allowed. */ if (tmp->c_time <= now) { /* * Consumer told us the callout may be run * directly from hardware interrupt context. */ if (tmp->c_iflags & CALLOUT_DIRECT) { #ifdef CALLOUT_PROFILING ++depth_dir; #endif cc_exec_next(cc) = LIST_NEXT(tmp, c_links.le); cc->cc_bucket = firstb & callwheelmask; LIST_REMOVE(tmp, c_links.le); softclock_call_cc(tmp, cc, #ifdef CALLOUT_PROFILING &mpcalls_dir, &lockcalls_dir, NULL, #endif 1); tmp = cc_exec_next(cc); cc_exec_next(cc) = NULL; } else { tmpn = LIST_NEXT(tmp, c_links.le); LIST_REMOVE(tmp, c_links.le); TAILQ_INSERT_TAIL(&cc->cc_expireq, tmp, c_links.tqe); tmp->c_iflags |= CALLOUT_PROCESSED; tmp = tmpn; } continue; } /* Skip events from distant future. */ if (tmp->c_time >= max) goto next; /* * Event minimal time is bigger than present maximal * time, so it cannot be aggregated. */ if (tmp->c_time > last) { lastb = nowb; goto next; } /* Update first and last time, respecting this event. */ if (tmp->c_time < first) first = tmp->c_time; tmp_max = tmp->c_time + tmp->c_precision; if (tmp_max < last) last = tmp_max; next: tmp = LIST_NEXT(tmp, c_links.le); } /* Proceed with the next bucket. */ firstb++; /* * Stop if we looked after present time and found * some event we can't execute at now. * Stop if we looked far enough into the future. */ } while (((int)(firstb - lastb)) <= 0); cc->cc_firstevent = last; cpu_new_callout(curcpu, last, first); #ifdef CALLOUT_PROFILING avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8; avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8; avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8; #endif mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); /* * swi_sched acquires the thread lock, so we don't want to call it * with cc_lock held; incorrect locking order. */ if (!TAILQ_EMPTY(&cc->cc_expireq)) swi_sched(cc->cc_cookie, 0); } static struct callout_cpu * callout_lock(struct callout *c) { struct callout_cpu *cc; int cpu; for (;;) { cpu = c->c_cpu; #ifdef SMP if (cpu == CPUBLOCK) { while (c->c_cpu == CPUBLOCK) cpu_spinwait(); continue; } #endif cc = CC_CPU(cpu); CC_LOCK(cc); if (cpu == c->c_cpu) break; CC_UNLOCK(cc); } return (cc); } static void callout_cc_add(struct callout *c, struct callout_cpu *cc, sbintime_t sbt, sbintime_t precision, void (*func)(void *), void *arg, int cpu, int flags) { int bucket; CC_LOCK_ASSERT(cc); if (sbt < cc->cc_lastscan) sbt = cc->cc_lastscan; c->c_arg = arg; c->c_iflags |= CALLOUT_PENDING; c->c_iflags &= ~CALLOUT_PROCESSED; c->c_flags |= CALLOUT_ACTIVE; if (flags & C_DIRECT_EXEC) c->c_iflags |= CALLOUT_DIRECT; c->c_func = func; c->c_time = sbt; c->c_precision = precision; bucket = callout_get_bucket(c->c_time); CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x", c, (int)(c->c_precision >> 32), (u_int)(c->c_precision & 0xffffffff)); LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le); if (cc->cc_bucket == bucket) cc_exec_next(cc) = c; /* * Inform the eventtimers(4) subsystem there's a new callout * that has been inserted, but only if really required. */ if (SBT_MAX - c->c_time < c->c_precision) c->c_precision = SBT_MAX - c->c_time; sbt = c->c_time + c->c_precision; if (sbt < cc->cc_firstevent) { cc->cc_firstevent = sbt; cpu_new_callout(cpu, sbt, c->c_time); } } static void softclock_call_cc(struct callout *c, struct callout_cpu *cc, #ifdef CALLOUT_PROFILING int *mpcalls, int *lockcalls, int *gcalls, #endif int direct) { struct rm_priotracker tracker; callout_func_t *c_func, *drain; void *c_arg; struct lock_class *class; struct lock_object *c_lock; uintptr_t lock_status; int c_iflags; #ifdef SMP struct callout_cpu *new_cc; callout_func_t *new_func; void *new_arg; int flags, new_cpu; sbintime_t new_prec, new_time; #endif #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) sbintime_t sbt1, sbt2; struct timespec ts2; static sbintime_t maxdt = 2 * SBT_1MS; /* 2 msec */ static callout_func_t *lastfunc; #endif KASSERT((c->c_iflags & CALLOUT_PENDING) == CALLOUT_PENDING, ("softclock_call_cc: pend %p %x", c, c->c_iflags)); KASSERT((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE, ("softclock_call_cc: act %p %x", c, c->c_flags)); class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL; lock_status = 0; if (c->c_flags & CALLOUT_SHAREDLOCK) { if (class == &lock_class_rm) lock_status = (uintptr_t)&tracker; else lock_status = 1; } c_lock = c->c_lock; c_func = c->c_func; c_arg = c->c_arg; c_iflags = c->c_iflags; c->c_iflags &= ~CALLOUT_PENDING; cc_exec_curr(cc, direct) = c; cc_exec_last_func(cc, direct) = c_func; cc_exec_last_arg(cc, direct) = c_arg; cc_exec_cancel(cc, direct) = false; cc_exec_drain(cc, direct) = NULL; CC_UNLOCK(cc); if (c_lock != NULL) { class->lc_lock(c_lock, lock_status); /* * The callout may have been cancelled * while we switched locks. */ if (cc_exec_cancel(cc, direct)) { class->lc_unlock(c_lock); goto skip; } /* The callout cannot be stopped now. */ cc_exec_cancel(cc, direct) = true; if (c_lock == &Giant.lock_object) { #ifdef CALLOUT_PROFILING (*gcalls)++; #endif CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p", c, c_func, c_arg); } else { #ifdef CALLOUT_PROFILING (*lockcalls)++; #endif CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p", c, c_func, c_arg); } } else { #ifdef CALLOUT_PROFILING (*mpcalls)++; #endif CTR3(KTR_CALLOUT, "callout %p func %p arg %p", c, c_func, c_arg); } KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running", "func:%p", c_func, "arg:%p", c_arg, "direct:%d", direct); #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) sbt1 = sbinuptime(); #endif THREAD_NO_SLEEPING(); SDT_PROBE1(callout_execute, , , callout__start, c); c_func(c_arg); SDT_PROBE1(callout_execute, , , callout__end, c); THREAD_SLEEPING_OK(); #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) sbt2 = sbinuptime(); sbt2 -= sbt1; if (sbt2 > maxdt) { if (lastfunc != c_func || sbt2 > maxdt * 2) { ts2 = sbttots(sbt2); printf( "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec); } maxdt = sbt2; lastfunc = c_func; } #endif KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle"); CTR1(KTR_CALLOUT, "callout %p finished", c); if ((c_iflags & CALLOUT_RETURNUNLOCKED) == 0) class->lc_unlock(c_lock); skip: CC_LOCK(cc); KASSERT(cc_exec_curr(cc, direct) == c, ("mishandled cc_curr")); cc_exec_curr(cc, direct) = NULL; if (cc_exec_drain(cc, direct)) { drain = cc_exec_drain(cc, direct); cc_exec_drain(cc, direct) = NULL; CC_UNLOCK(cc); drain(c_arg); CC_LOCK(cc); } if (cc_exec_waiting(cc, direct)) { /* * There is someone waiting for the * callout to complete. * If the callout was scheduled for * migration just cancel it. */ if (cc_cce_migrating(cc, direct)) { cc_cce_cleanup(cc, direct); /* * It should be assert here that the callout is not * destroyed but that is not easy. */ c->c_iflags &= ~CALLOUT_DFRMIGRATION; } cc_exec_waiting(cc, direct) = false; CC_UNLOCK(cc); wakeup(&cc_exec_waiting(cc, direct)); CC_LOCK(cc); } else if (cc_cce_migrating(cc, direct)) { #ifdef SMP /* * If the callout was scheduled for * migration just perform it now. */ new_cpu = cc_migration_cpu(cc, direct); new_time = cc_migration_time(cc, direct); new_prec = cc_migration_prec(cc, direct); new_func = cc_migration_func(cc, direct); new_arg = cc_migration_arg(cc, direct); cc_cce_cleanup(cc, direct); /* * It should be assert here that the callout is not destroyed * but that is not easy. * * As first thing, handle deferred callout stops. */ if (!callout_migrating(c)) { CTR3(KTR_CALLOUT, "deferred cancelled %p func %p arg %p", c, new_func, new_arg); return; } c->c_iflags &= ~CALLOUT_DFRMIGRATION; new_cc = callout_cpu_switch(c, cc, new_cpu); flags = (direct) ? C_DIRECT_EXEC : 0; callout_cc_add(c, new_cc, new_time, new_prec, new_func, new_arg, new_cpu, flags); CC_UNLOCK(new_cc); CC_LOCK(cc); #else panic("migration should not happen"); #endif } } /* * The callout mechanism is based on the work of Adam M. Costello and * George Varghese, published in a technical report entitled "Redesigning * the BSD Callout and Timer Facilities" and modified slightly for inclusion * in FreeBSD by Justin T. Gibbs. The original work on the data structures * used in this implementation was published by G. Varghese and T. Lauck in * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for * the Efficient Implementation of a Timer Facility" in the Proceedings of * the 11th ACM Annual Symposium on Operating Systems Principles, * Austin, Texas Nov 1987. */ /* * Software (low priority) clock interrupt. * Run periodic events from timeout queue. */ void softclock(void *arg) { struct callout_cpu *cc; struct callout *c; #ifdef CALLOUT_PROFILING int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0; #endif cc = (struct callout_cpu *)arg; CC_LOCK(cc); while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) { TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); softclock_call_cc(c, cc, #ifdef CALLOUT_PROFILING &mpcalls, &lockcalls, &gcalls, #endif 0); #ifdef CALLOUT_PROFILING ++depth; #endif } #ifdef CALLOUT_PROFILING avg_depth += (depth * 1000 - avg_depth) >> 8; avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; #endif CC_UNLOCK(cc); } void callout_when(sbintime_t sbt, sbintime_t precision, int flags, sbintime_t *res, sbintime_t *prec_res) { sbintime_t to_sbt, to_pr; if ((flags & (C_ABSOLUTE | C_PRECALC)) != 0) { *res = sbt; *prec_res = precision; return; } if ((flags & C_HARDCLOCK) != 0 && sbt < tick_sbt) sbt = tick_sbt; if ((flags & C_HARDCLOCK) != 0 || sbt >= sbt_tickthreshold) { /* * Obtain the time of the last hardclock() call on * this CPU directly from the kern_clocksource.c. * This value is per-CPU, but it is equal for all * active ones. */ #ifdef __LP64__ to_sbt = DPCPU_GET(hardclocktime); #else spinlock_enter(); to_sbt = DPCPU_GET(hardclocktime); spinlock_exit(); #endif if (cold && to_sbt == 0) to_sbt = sbinuptime(); if ((flags & C_HARDCLOCK) == 0) to_sbt += tick_sbt; } else to_sbt = sbinuptime(); if (SBT_MAX - to_sbt < sbt) to_sbt = SBT_MAX; else to_sbt += sbt; *res = to_sbt; to_pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp : sbt >> C_PRELGET(flags)); *prec_res = to_pr > precision ? to_pr : precision; } /* * New interface; clients allocate their own callout structures. * * callout_reset() - establish or change a timeout * callout_stop() - disestablish a timeout * callout_init() - initialize a callout structure so that it can * safely be passed to callout_reset() and callout_stop() * * defines three convenience macros: * * callout_active() - returns truth if callout has not been stopped, * drained, or deactivated since the last time the callout was * reset. * callout_pending() - returns truth if callout is still waiting for timeout * callout_deactivate() - marks the callout as having been serviced */ int callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t prec, callout_func_t *ftn, void *arg, int cpu, int flags) { sbintime_t to_sbt, precision; struct callout_cpu *cc; int cancelled, direct; int ignore_cpu=0; cancelled = 0; if (cpu == -1) { ignore_cpu = 1; } else if ((cpu >= MAXCPU) || ((CC_CPU(cpu))->cc_inited == 0)) { /* Invalid CPU spec */ panic("Invalid CPU in callout %d", cpu); } callout_when(sbt, prec, flags, &to_sbt, &precision); /* * This flag used to be added by callout_cc_add, but the * first time you call this we could end up with the * wrong direct flag if we don't do it before we add. */ if (flags & C_DIRECT_EXEC) { direct = 1; } else { direct = 0; } - KASSERT(!direct || c->c_lock == NULL, - ("%s: direct callout %p has lock", __func__, c)); + KASSERT(!direct || c->c_lock == NULL || + (LOCK_CLASS(c->c_lock)->lc_flags & LC_SPINLOCK), + ("%s: direct callout %p has non-spin lock", __func__, c)); cc = callout_lock(c); /* * Don't allow migration if the user does not care. */ if (ignore_cpu) { cpu = c->c_cpu; } if (cc_exec_curr(cc, direct) == c) { /* * We're being asked to reschedule a callout which is * currently in progress. If there is a lock then we * can cancel the callout if it has not really started. */ if (c->c_lock != NULL && !cc_exec_cancel(cc, direct)) cancelled = cc_exec_cancel(cc, direct) = true; if (cc_exec_waiting(cc, direct) || cc_exec_drain(cc, direct)) { /* * Someone has called callout_drain to kill this * callout. Don't reschedule. */ CTR4(KTR_CALLOUT, "%s %p func %p arg %p", cancelled ? "cancelled" : "failed to cancel", c, c->c_func, c->c_arg); CC_UNLOCK(cc); return (cancelled); } #ifdef SMP if (callout_migrating(c)) { /* * This only occurs when a second callout_reset_sbt_on * is made after a previous one moved it into * deferred migration (below). Note we do *not* change * the prev_cpu even though the previous target may * be different. */ cc_migration_cpu(cc, direct) = cpu; cc_migration_time(cc, direct) = to_sbt; cc_migration_prec(cc, direct) = precision; cc_migration_func(cc, direct) = ftn; cc_migration_arg(cc, direct) = arg; cancelled = 1; CC_UNLOCK(cc); return (cancelled); } #endif } if (c->c_iflags & CALLOUT_PENDING) { if ((c->c_iflags & CALLOUT_PROCESSED) == 0) { if (cc_exec_next(cc) == c) cc_exec_next(cc) = LIST_NEXT(c, c_links.le); LIST_REMOVE(c, c_links.le); } else { TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); } cancelled = 1; c->c_iflags &= ~ CALLOUT_PENDING; c->c_flags &= ~ CALLOUT_ACTIVE; } #ifdef SMP /* * If the callout must migrate try to perform it immediately. * If the callout is currently running, just defer the migration * to a more appropriate moment. */ if (c->c_cpu != cpu) { if (cc_exec_curr(cc, direct) == c) { /* * Pending will have been removed since we are * actually executing the callout on another * CPU. That callout should be waiting on the * lock the caller holds. If we set both * active/and/pending after we return and the * lock on the executing callout proceeds, it * will then see pending is true and return. * At the return from the actual callout execution * the migration will occur in softclock_call_cc * and this new callout will be placed on the * new CPU via a call to callout_cpu_switch() which * will get the lock on the right CPU followed * by a call callout_cc_add() which will add it there. * (see above in softclock_call_cc()). */ cc_migration_cpu(cc, direct) = cpu; cc_migration_time(cc, direct) = to_sbt; cc_migration_prec(cc, direct) = precision; cc_migration_func(cc, direct) = ftn; cc_migration_arg(cc, direct) = arg; c->c_iflags |= (CALLOUT_DFRMIGRATION | CALLOUT_PENDING); c->c_flags |= CALLOUT_ACTIVE; CTR6(KTR_CALLOUT, "migration of %p func %p arg %p in %d.%08x to %u deferred", c, c->c_func, c->c_arg, (int)(to_sbt >> 32), (u_int)(to_sbt & 0xffffffff), cpu); CC_UNLOCK(cc); return (cancelled); } cc = callout_cpu_switch(c, cc, cpu); } #endif callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags); CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x", cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32), (u_int)(to_sbt & 0xffffffff)); CC_UNLOCK(cc); return (cancelled); } /* * Common idioms that can be optimized in the future. */ int callout_schedule_on(struct callout *c, int to_ticks, int cpu) { return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu); } int callout_schedule(struct callout *c, int to_ticks) { return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu); } int _callout_stop_safe(struct callout *c, int flags, callout_func_t *drain) { struct callout_cpu *cc, *old_cc; struct lock_class *class; int direct, sq_locked, use_lock; int cancelled, not_on_a_list; if ((flags & CS_DRAIN) != 0) WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, c->c_lock, "calling %s", __func__); KASSERT((flags & CS_DRAIN) == 0 || drain == NULL, ("Cannot set drain callback and CS_DRAIN flag at the same time")); /* * Some old subsystems don't hold Giant while running a callout_stop(), * so just discard this check for the moment. */ if ((flags & CS_DRAIN) == 0 && c->c_lock != NULL) { if (c->c_lock == &Giant.lock_object) use_lock = mtx_owned(&Giant); else { use_lock = 1; class = LOCK_CLASS(c->c_lock); class->lc_assert(c->c_lock, LA_XLOCKED); } } else use_lock = 0; if (c->c_iflags & CALLOUT_DIRECT) { direct = 1; } else { direct = 0; } sq_locked = 0; old_cc = NULL; again: cc = callout_lock(c); if ((c->c_iflags & (CALLOUT_DFRMIGRATION | CALLOUT_PENDING)) == (CALLOUT_DFRMIGRATION | CALLOUT_PENDING) && ((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE)) { /* * Special case where this slipped in while we * were migrating *as* the callout is about to * execute. The caller probably holds the lock * the callout wants. * * Get rid of the migration first. Then set * the flag that tells this code *not* to * try to remove it from any lists (its not * on one yet). When the callout wheel runs, * it will ignore this callout. */ c->c_iflags &= ~CALLOUT_PENDING; c->c_flags &= ~CALLOUT_ACTIVE; not_on_a_list = 1; } else { not_on_a_list = 0; } /* * If the callout was migrating while the callout cpu lock was * dropped, just drop the sleepqueue lock and check the states * again. */ if (sq_locked != 0 && cc != old_cc) { #ifdef SMP CC_UNLOCK(cc); sleepq_release(&cc_exec_waiting(old_cc, direct)); sq_locked = 0; old_cc = NULL; goto again; #else panic("migration should not happen"); #endif } /* * If the callout is running, try to stop it or drain it. */ if (cc_exec_curr(cc, direct) == c) { /* * Succeed we to stop it or not, we must clear the * active flag - this is what API users expect. If we're * draining and the callout is currently executing, first wait * until it finishes. */ if ((flags & CS_DRAIN) == 0) c->c_flags &= ~CALLOUT_ACTIVE; if ((flags & CS_DRAIN) != 0) { /* * The current callout is running (or just * about to run) and blocking is allowed, so * just wait for the current invocation to * finish. */ if (cc_exec_curr(cc, direct) == c) { /* * Use direct calls to sleepqueue interface * instead of cv/msleep in order to avoid * a LOR between cc_lock and sleepqueue * chain spinlocks. This piece of code * emulates a msleep_spin() call actually. * * If we already have the sleepqueue chain * locked, then we can safely block. If we * don't already have it locked, however, * we have to drop the cc_lock to lock * it. This opens several races, so we * restart at the beginning once we have * both locks. If nothing has changed, then * we will end up back here with sq_locked * set. */ if (!sq_locked) { CC_UNLOCK(cc); sleepq_lock( &cc_exec_waiting(cc, direct)); sq_locked = 1; old_cc = cc; goto again; } /* * Migration could be cancelled here, but * as long as it is still not sure when it * will be packed up, just let softclock() * take care of it. */ cc_exec_waiting(cc, direct) = true; DROP_GIANT(); CC_UNLOCK(cc); sleepq_add( &cc_exec_waiting(cc, direct), &cc->cc_lock.lock_object, "codrain", SLEEPQ_SLEEP, 0); sleepq_wait( &cc_exec_waiting(cc, direct), 0); sq_locked = 0; old_cc = NULL; /* Reacquire locks previously released. */ PICKUP_GIANT(); goto again; } c->c_flags &= ~CALLOUT_ACTIVE; } else if (use_lock && !cc_exec_cancel(cc, direct) && (drain == NULL)) { /* * The current callout is waiting for its * lock which we hold. Cancel the callout * and return. After our caller drops the * lock, the callout will be skipped in * softclock(). This *only* works with a * callout_stop() *not* callout_drain() or * callout_async_drain(). */ cc_exec_cancel(cc, direct) = true; CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", c, c->c_func, c->c_arg); KASSERT(!cc_cce_migrating(cc, direct), ("callout wrongly scheduled for migration")); if (callout_migrating(c)) { c->c_iflags &= ~CALLOUT_DFRMIGRATION; #ifdef SMP cc_migration_cpu(cc, direct) = CPUBLOCK; cc_migration_time(cc, direct) = 0; cc_migration_prec(cc, direct) = 0; cc_migration_func(cc, direct) = NULL; cc_migration_arg(cc, direct) = NULL; #endif } CC_UNLOCK(cc); KASSERT(!sq_locked, ("sleepqueue chain locked")); return (1); } else if (callout_migrating(c)) { /* * The callout is currently being serviced * and the "next" callout is scheduled at * its completion with a migration. We remove * the migration flag so it *won't* get rescheduled, * but we can't stop the one thats running so * we return 0. */ c->c_iflags &= ~CALLOUT_DFRMIGRATION; #ifdef SMP /* * We can't call cc_cce_cleanup here since * if we do it will remove .ce_curr and * its still running. This will prevent a * reschedule of the callout when the * execution completes. */ cc_migration_cpu(cc, direct) = CPUBLOCK; cc_migration_time(cc, direct) = 0; cc_migration_prec(cc, direct) = 0; cc_migration_func(cc, direct) = NULL; cc_migration_arg(cc, direct) = NULL; #endif CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p", c, c->c_func, c->c_arg); if (drain) { KASSERT(cc_exec_drain(cc, direct) == NULL, ("callout drain function already set to %p", cc_exec_drain(cc, direct))); cc_exec_drain(cc, direct) = drain; } CC_UNLOCK(cc); return ((flags & CS_EXECUTING) != 0); } else { CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", c, c->c_func, c->c_arg); if (drain) { KASSERT(cc_exec_drain(cc, direct) == NULL, ("callout drain function already set to %p", cc_exec_drain(cc, direct))); cc_exec_drain(cc, direct) = drain; } } KASSERT(!sq_locked, ("sleepqueue chain still locked")); cancelled = ((flags & CS_EXECUTING) != 0); } else cancelled = 1; if (sq_locked) sleepq_release(&cc_exec_waiting(cc, direct)); if ((c->c_iflags & CALLOUT_PENDING) == 0) { CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", c, c->c_func, c->c_arg); /* * For not scheduled and not executing callout return * negative value. */ if (cc_exec_curr(cc, direct) != c) cancelled = -1; CC_UNLOCK(cc); return (cancelled); } c->c_iflags &= ~CALLOUT_PENDING; c->c_flags &= ~CALLOUT_ACTIVE; CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", c, c->c_func, c->c_arg); if (not_on_a_list == 0) { if ((c->c_iflags & CALLOUT_PROCESSED) == 0) { if (cc_exec_next(cc) == c) cc_exec_next(cc) = LIST_NEXT(c, c_links.le); LIST_REMOVE(c, c_links.le); } else { TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); } } CC_UNLOCK(cc); return (cancelled); } void callout_init(struct callout *c, int mpsafe) { bzero(c, sizeof *c); if (mpsafe) { c->c_lock = NULL; c->c_iflags = CALLOUT_RETURNUNLOCKED; } else { c->c_lock = &Giant.lock_object; c->c_iflags = 0; } c->c_cpu = cc_default_cpu; } void _callout_init_lock(struct callout *c, struct lock_object *lock, int flags) { bzero(c, sizeof *c); c->c_lock = lock; KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, ("callout_init_lock: bad flags %d", flags)); KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); - KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & - (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", - __func__)); + KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & LC_SLEEPABLE), + ("%s: callout %p has sleepable lock", __func__, c)); c->c_iflags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); c->c_cpu = cc_default_cpu; } static int flssbt(sbintime_t sbt) { sbt += (uint64_t)sbt >> 1; if (sizeof(long) >= sizeof(sbintime_t)) return (flsl(sbt)); if (sbt >= SBT_1S) return (flsl(((uint64_t)sbt) >> 32) + 32); return (flsl(sbt)); } /* * Dump immediate statistic snapshot of the scheduled callouts. */ static int sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS) { struct callout *tmp; struct callout_cpu *cc; struct callout_list *sc; sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t; int ct[64], cpr[64], ccpbk[32]; int error, val, i, count, tcum, pcum, maxc, c, medc; int cpu; val = 0; error = sysctl_handle_int(oidp, &val, 0, req); if (error != 0 || req->newptr == NULL) return (error); count = maxc = 0; st = spr = maxt = maxpr = 0; bzero(ccpbk, sizeof(ccpbk)); bzero(ct, sizeof(ct)); bzero(cpr, sizeof(cpr)); now = sbinuptime(); CPU_FOREACH(cpu) { cc = CC_CPU(cpu); CC_LOCK(cc); for (i = 0; i < callwheelsize; i++) { sc = &cc->cc_callwheel[i]; c = 0; LIST_FOREACH(tmp, sc, c_links.le) { c++; t = tmp->c_time - now; if (t < 0) t = 0; st += t / SBT_1US; spr += tmp->c_precision / SBT_1US; if (t > maxt) maxt = t; if (tmp->c_precision > maxpr) maxpr = tmp->c_precision; ct[flssbt(t)]++; cpr[flssbt(tmp->c_precision)]++; } if (c > maxc) maxc = c; ccpbk[fls(c + c / 2)]++; count += c; } CC_UNLOCK(cc); } for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++) tcum += ct[i]; medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++) pcum += cpr[i]; medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; for (i = 0, c = 0; i < 32 && c < count / 2; i++) c += ccpbk[i]; medc = (i >= 2) ? (1 << (i - 2)) : 0; printf("Scheduled callouts statistic snapshot:\n"); printf(" Callouts: %6d Buckets: %6d*%-3d Bucket size: 0.%06ds\n", count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT); printf(" C/Bk: med %5d avg %6d.%06jd max %6d\n", medc, count / callwheelsize / mp_ncpus, (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000, maxc); printf(" Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32, (st / count) / 1000000, (st / count) % 1000000, maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32); printf(" Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32, (spr / count) / 1000000, (spr / count) % 1000000, maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32); printf(" Distribution: \tbuckets\t time\t tcum\t" " prec\t pcum\n"); for (i = 0, tcum = pcum = 0; i < 64; i++) { if (ct[i] == 0 && cpr[i] == 0) continue; t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0; tcum += ct[i]; pcum += cpr[i]; printf(" %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n", t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32, i - 1 - (32 - CC_HASH_SHIFT), ct[i], tcum, cpr[i], pcum); } return (error); } SYSCTL_PROC(_kern, OID_AUTO, callout_stat, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0, sysctl_kern_callout_stat, "I", "Dump immediate statistic snapshot of the scheduled callouts"); #ifdef DDB static void _show_callout(struct callout *c) { db_printf("callout %p\n", c); #define C_DB_PRINTF(f, e) db_printf(" %s = " f "\n", #e, c->e); db_printf(" &c_links = %p\n", &(c->c_links)); C_DB_PRINTF("%" PRId64, c_time); C_DB_PRINTF("%" PRId64, c_precision); C_DB_PRINTF("%p", c_arg); C_DB_PRINTF("%p", c_func); C_DB_PRINTF("%p", c_lock); C_DB_PRINTF("%#x", c_flags); C_DB_PRINTF("%#x", c_iflags); C_DB_PRINTF("%d", c_cpu); #undef C_DB_PRINTF } DB_SHOW_COMMAND(callout, db_show_callout) { if (!have_addr) { db_printf("usage: show callout \n"); return; } _show_callout((struct callout *)addr); } static void _show_last_callout(int cpu, int direct, const char *dirstr) { struct callout_cpu *cc; void *func, *arg; cc = CC_CPU(cpu); func = cc_exec_last_func(cc, direct); arg = cc_exec_last_arg(cc, direct); db_printf("cpu %d last%s callout function: %p ", cpu, dirstr, func); db_printsym((db_expr_t)func, DB_STGY_ANY); db_printf("\ncpu %d last%s callout argument: %p\n", cpu, dirstr, arg); } DB_SHOW_COMMAND(callout_last, db_show_callout_last) { int cpu, last; if (have_addr) { if (addr < 0 || addr > mp_maxid || CPU_ABSENT(addr)) { db_printf("no such cpu: %d\n", (int)addr); return; } cpu = last = addr; } else { cpu = 0; last = mp_maxid; } while (cpu <= last) { if (!CPU_ABSENT(cpu)) { _show_last_callout(cpu, 0, ""); _show_last_callout(cpu, 1, " direct"); } cpu++; } } #endif /* DDB */ diff --git a/sys/kern/subr_taskqueue.c b/sys/kern/subr_taskqueue.c index 061361cc06d7..e43b09010761 100644 --- a/sys/kern/subr_taskqueue.c +++ b/sys/kern/subr_taskqueue.c @@ -1,882 +1,883 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2000 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); static void *taskqueue_giant_ih; static void *taskqueue_ih; static void taskqueue_fast_enqueue(void *); static void taskqueue_swi_enqueue(void *); static void taskqueue_swi_giant_enqueue(void *); struct taskqueue_busy { struct task *tb_running; u_int tb_seq; LIST_ENTRY(taskqueue_busy) tb_link; }; struct taskqueue { STAILQ_HEAD(, task) tq_queue; LIST_HEAD(, taskqueue_busy) tq_active; struct task *tq_hint; u_int tq_seq; int tq_callouts; struct mtx_padalign tq_mutex; taskqueue_enqueue_fn tq_enqueue; void *tq_context; char *tq_name; struct thread **tq_threads; int tq_tcount; int tq_spin; int tq_flags; taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS]; void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS]; }; #define TQ_FLAGS_ACTIVE (1 << 0) #define TQ_FLAGS_BLOCKED (1 << 1) #define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2) #define DT_CALLOUT_ARMED (1 << 0) #define DT_DRAIN_IN_PROGRESS (1 << 1) #define TQ_LOCK(tq) \ do { \ if ((tq)->tq_spin) \ mtx_lock_spin(&(tq)->tq_mutex); \ else \ mtx_lock(&(tq)->tq_mutex); \ } while (0) #define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED) #define TQ_UNLOCK(tq) \ do { \ if ((tq)->tq_spin) \ mtx_unlock_spin(&(tq)->tq_mutex); \ else \ mtx_unlock(&(tq)->tq_mutex); \ } while (0) #define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED) void _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task, int priority, task_fn_t func, void *context) { TASK_INIT(&timeout_task->t, priority, func, context); callout_init_mtx(&timeout_task->c, &queue->tq_mutex, CALLOUT_RETURNUNLOCKED); timeout_task->q = queue; timeout_task->f = 0; } static __inline int TQ_SLEEP(struct taskqueue *tq, void *p, const char *wm) { if (tq->tq_spin) return (msleep_spin(p, (struct mtx *)&tq->tq_mutex, wm, 0)); return (msleep(p, &tq->tq_mutex, 0, wm, 0)); } static struct taskqueue * _taskqueue_create(const char *name, int mflags, taskqueue_enqueue_fn enqueue, void *context, int mtxflags, const char *mtxname __unused) { struct taskqueue *queue; char *tq_name; tq_name = malloc(TASKQUEUE_NAMELEN, M_TASKQUEUE, mflags | M_ZERO); if (tq_name == NULL) return (NULL); queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO); if (queue == NULL) { free(tq_name, M_TASKQUEUE); return (NULL); } snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue"); STAILQ_INIT(&queue->tq_queue); LIST_INIT(&queue->tq_active); queue->tq_enqueue = enqueue; queue->tq_context = context; queue->tq_name = tq_name; queue->tq_spin = (mtxflags & MTX_SPIN) != 0; queue->tq_flags |= TQ_FLAGS_ACTIVE; if (enqueue == taskqueue_fast_enqueue || enqueue == taskqueue_swi_enqueue || enqueue == taskqueue_swi_giant_enqueue || enqueue == taskqueue_thread_enqueue) queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE; mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags); return (queue); } struct taskqueue * taskqueue_create(const char *name, int mflags, taskqueue_enqueue_fn enqueue, void *context) { return _taskqueue_create(name, mflags, enqueue, context, MTX_DEF, name); } void taskqueue_set_callback(struct taskqueue *queue, enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback, void *context) { KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) && (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)), ("Callback type %d not valid, must be %d-%d", cb_type, TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX)); KASSERT((queue->tq_callbacks[cb_type] == NULL), ("Re-initialization of taskqueue callback?")); queue->tq_callbacks[cb_type] = callback; queue->tq_cb_contexts[cb_type] = context; } /* * Signal a taskqueue thread to terminate. */ static void taskqueue_terminate(struct thread **pp, struct taskqueue *tq) { while (tq->tq_tcount > 0 || tq->tq_callouts > 0) { wakeup(tq); TQ_SLEEP(tq, pp, "tq_destroy"); } } void taskqueue_free(struct taskqueue *queue) { TQ_LOCK(queue); queue->tq_flags &= ~TQ_FLAGS_ACTIVE; taskqueue_terminate(queue->tq_threads, queue); KASSERT(LIST_EMPTY(&queue->tq_active), ("Tasks still running?")); KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks")); mtx_destroy(&queue->tq_mutex); free(queue->tq_threads, M_TASKQUEUE); free(queue->tq_name, M_TASKQUEUE); free(queue, M_TASKQUEUE); } static int taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task) { struct task *ins; struct task *prev; KASSERT(task->ta_func != NULL, ("enqueueing task with NULL func")); /* * Count multiple enqueues. */ if (task->ta_pending) { if (task->ta_pending < USHRT_MAX) task->ta_pending++; TQ_UNLOCK(queue); return (0); } /* * Optimise cases when all tasks use small set of priorities. * In case of only one priority we always insert at the end. * In case of two tq_hint typically gives the insertion point. * In case of more then two tq_hint should halve the search. */ prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); if (!prev || prev->ta_priority >= task->ta_priority) { STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); } else { prev = queue->tq_hint; if (prev && prev->ta_priority >= task->ta_priority) { ins = STAILQ_NEXT(prev, ta_link); } else { prev = NULL; ins = STAILQ_FIRST(&queue->tq_queue); } for (; ins; prev = ins, ins = STAILQ_NEXT(ins, ta_link)) if (ins->ta_priority < task->ta_priority) break; if (prev) { STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); queue->tq_hint = task; } else STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); } task->ta_pending = 1; if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0) TQ_UNLOCK(queue); if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) queue->tq_enqueue(queue->tq_context); if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0) TQ_UNLOCK(queue); /* Return with lock released. */ return (0); } int taskqueue_enqueue(struct taskqueue *queue, struct task *task) { int res; TQ_LOCK(queue); res = taskqueue_enqueue_locked(queue, task); /* The lock is released inside. */ return (res); } static void taskqueue_timeout_func(void *arg) { struct taskqueue *queue; struct timeout_task *timeout_task; timeout_task = arg; queue = timeout_task->q; KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout")); timeout_task->f &= ~DT_CALLOUT_ARMED; queue->tq_callouts--; taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t); /* The lock is released inside. */ } int taskqueue_enqueue_timeout_sbt(struct taskqueue *queue, struct timeout_task *timeout_task, sbintime_t sbt, sbintime_t pr, int flags) { int res; TQ_LOCK(queue); KASSERT(timeout_task->q == NULL || timeout_task->q == queue, ("Migrated queue")); - KASSERT(!queue->tq_spin, ("Timeout for spin-queue")); timeout_task->q = queue; res = timeout_task->t.ta_pending; if (timeout_task->f & DT_DRAIN_IN_PROGRESS) { /* Do nothing */ TQ_UNLOCK(queue); res = -1; } else if (sbt == 0) { taskqueue_enqueue_locked(queue, &timeout_task->t); /* The lock is released inside. */ } else { if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) { res++; } else { queue->tq_callouts++; timeout_task->f |= DT_CALLOUT_ARMED; if (sbt < 0) sbt = -sbt; /* Ignore overflow. */ } if (sbt > 0) { + if (queue->tq_spin) + flags |= C_DIRECT_EXEC; callout_reset_sbt(&timeout_task->c, sbt, pr, taskqueue_timeout_func, timeout_task, flags); } TQ_UNLOCK(queue); } return (res); } int taskqueue_enqueue_timeout(struct taskqueue *queue, struct timeout_task *ttask, int ticks) { return (taskqueue_enqueue_timeout_sbt(queue, ttask, ticks * tick_sbt, 0, C_HARDCLOCK)); } static void taskqueue_task_nop_fn(void *context, int pending) { } /* * Block until all currently queued tasks in this taskqueue * have begun execution. Tasks queued during execution of * this function are ignored. */ static int taskqueue_drain_tq_queue(struct taskqueue *queue) { struct task t_barrier; if (STAILQ_EMPTY(&queue->tq_queue)) return (0); /* * Enqueue our barrier after all current tasks, but with * the highest priority so that newly queued tasks cannot * pass it. Because of the high priority, we can not use * taskqueue_enqueue_locked directly (which drops the lock * anyway) so just insert it at tail while we have the * queue lock. */ TASK_INIT(&t_barrier, UCHAR_MAX, taskqueue_task_nop_fn, &t_barrier); STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link); queue->tq_hint = &t_barrier; t_barrier.ta_pending = 1; /* * Once the barrier has executed, all previously queued tasks * have completed or are currently executing. */ while (t_barrier.ta_pending != 0) TQ_SLEEP(queue, &t_barrier, "tq_qdrain"); return (1); } /* * Block until all currently executing tasks for this taskqueue * complete. Tasks that begin execution during the execution * of this function are ignored. */ static int taskqueue_drain_tq_active(struct taskqueue *queue) { struct taskqueue_busy *tb; u_int seq; if (LIST_EMPTY(&queue->tq_active)) return (0); /* Block taskq_terminate().*/ queue->tq_callouts++; /* Wait for any active task with sequence from the past. */ seq = queue->tq_seq; restart: LIST_FOREACH(tb, &queue->tq_active, tb_link) { if ((int)(tb->tb_seq - seq) <= 0) { TQ_SLEEP(queue, tb->tb_running, "tq_adrain"); goto restart; } } /* Release taskqueue_terminate(). */ queue->tq_callouts--; if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0) wakeup_one(queue->tq_threads); return (1); } void taskqueue_block(struct taskqueue *queue) { TQ_LOCK(queue); queue->tq_flags |= TQ_FLAGS_BLOCKED; TQ_UNLOCK(queue); } void taskqueue_unblock(struct taskqueue *queue) { TQ_LOCK(queue); queue->tq_flags &= ~TQ_FLAGS_BLOCKED; if (!STAILQ_EMPTY(&queue->tq_queue)) queue->tq_enqueue(queue->tq_context); TQ_UNLOCK(queue); } static void taskqueue_run_locked(struct taskqueue *queue) { struct epoch_tracker et; struct taskqueue_busy tb; struct task *task; bool in_net_epoch; int pending; KASSERT(queue != NULL, ("tq is NULL")); TQ_ASSERT_LOCKED(queue); tb.tb_running = NULL; LIST_INSERT_HEAD(&queue->tq_active, &tb, tb_link); in_net_epoch = false; while ((task = STAILQ_FIRST(&queue->tq_queue)) != NULL) { STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); if (queue->tq_hint == task) queue->tq_hint = NULL; pending = task->ta_pending; task->ta_pending = 0; tb.tb_running = task; tb.tb_seq = ++queue->tq_seq; TQ_UNLOCK(queue); KASSERT(task->ta_func != NULL, ("task->ta_func is NULL")); if (!in_net_epoch && TASK_IS_NET(task)) { in_net_epoch = true; NET_EPOCH_ENTER(et); } else if (in_net_epoch && !TASK_IS_NET(task)) { NET_EPOCH_EXIT(et); in_net_epoch = false; } task->ta_func(task->ta_context, pending); TQ_LOCK(queue); wakeup(task); } if (in_net_epoch) NET_EPOCH_EXIT(et); LIST_REMOVE(&tb, tb_link); } void taskqueue_run(struct taskqueue *queue) { TQ_LOCK(queue); taskqueue_run_locked(queue); TQ_UNLOCK(queue); } static int task_is_running(struct taskqueue *queue, struct task *task) { struct taskqueue_busy *tb; TQ_ASSERT_LOCKED(queue); LIST_FOREACH(tb, &queue->tq_active, tb_link) { if (tb->tb_running == task) return (1); } return (0); } /* * Only use this function in single threaded contexts. It returns * non-zero if the given task is either pending or running. Else the * task is idle and can be queued again or freed. */ int taskqueue_poll_is_busy(struct taskqueue *queue, struct task *task) { int retval; TQ_LOCK(queue); retval = task->ta_pending > 0 || task_is_running(queue, task); TQ_UNLOCK(queue); return (retval); } static int taskqueue_cancel_locked(struct taskqueue *queue, struct task *task, u_int *pendp) { if (task->ta_pending > 0) { STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link); if (queue->tq_hint == task) queue->tq_hint = NULL; } if (pendp != NULL) *pendp = task->ta_pending; task->ta_pending = 0; return (task_is_running(queue, task) ? EBUSY : 0); } int taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp) { int error; TQ_LOCK(queue); error = taskqueue_cancel_locked(queue, task, pendp); TQ_UNLOCK(queue); return (error); } int taskqueue_cancel_timeout(struct taskqueue *queue, struct timeout_task *timeout_task, u_int *pendp) { u_int pending, pending1; int error; TQ_LOCK(queue); pending = !!(callout_stop(&timeout_task->c) > 0); error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1); if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) { timeout_task->f &= ~DT_CALLOUT_ARMED; queue->tq_callouts--; } TQ_UNLOCK(queue); if (pendp != NULL) *pendp = pending + pending1; return (error); } void taskqueue_drain(struct taskqueue *queue, struct task *task) { if (!queue->tq_spin) WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); TQ_LOCK(queue); while (task->ta_pending != 0 || task_is_running(queue, task)) TQ_SLEEP(queue, task, "tq_drain"); TQ_UNLOCK(queue); } void taskqueue_drain_all(struct taskqueue *queue) { if (!queue->tq_spin) WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); TQ_LOCK(queue); (void)taskqueue_drain_tq_queue(queue); (void)taskqueue_drain_tq_active(queue); TQ_UNLOCK(queue); } void taskqueue_drain_timeout(struct taskqueue *queue, struct timeout_task *timeout_task) { /* * Set flag to prevent timer from re-starting during drain: */ TQ_LOCK(queue); KASSERT((timeout_task->f & DT_DRAIN_IN_PROGRESS) == 0, ("Drain already in progress")); timeout_task->f |= DT_DRAIN_IN_PROGRESS; TQ_UNLOCK(queue); callout_drain(&timeout_task->c); taskqueue_drain(queue, &timeout_task->t); /* * Clear flag to allow timer to re-start: */ TQ_LOCK(queue); timeout_task->f &= ~DT_DRAIN_IN_PROGRESS; TQ_UNLOCK(queue); } void taskqueue_quiesce(struct taskqueue *queue) { int ret; TQ_LOCK(queue); do { ret = taskqueue_drain_tq_queue(queue); if (ret == 0) ret = taskqueue_drain_tq_active(queue); } while (ret != 0); TQ_UNLOCK(queue); } static void taskqueue_swi_enqueue(void *context) { swi_sched(taskqueue_ih, 0); } static void taskqueue_swi_run(void *dummy) { taskqueue_run(taskqueue_swi); } static void taskqueue_swi_giant_enqueue(void *context) { swi_sched(taskqueue_giant_ih, 0); } static void taskqueue_swi_giant_run(void *dummy) { taskqueue_run(taskqueue_swi_giant); } static int _taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, cpuset_t *mask, struct proc *p, const char *name, va_list ap) { char ktname[MAXCOMLEN + 1]; struct thread *td; struct taskqueue *tq; int i, error; if (count <= 0) return (EINVAL); vsnprintf(ktname, sizeof(ktname), name, ap); tq = *tqp; tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE, M_NOWAIT | M_ZERO); if (tq->tq_threads == NULL) { printf("%s: no memory for %s threads\n", __func__, ktname); return (ENOMEM); } for (i = 0; i < count; i++) { if (count == 1) error = kthread_add(taskqueue_thread_loop, tqp, p, &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname); else error = kthread_add(taskqueue_thread_loop, tqp, p, &tq->tq_threads[i], RFSTOPPED, 0, "%s_%d", ktname, i); if (error) { /* should be ok to continue, taskqueue_free will dtrt */ printf("%s: kthread_add(%s): error %d", __func__, ktname, error); tq->tq_threads[i] = NULL; /* paranoid */ } else tq->tq_tcount++; } if (tq->tq_tcount == 0) { free(tq->tq_threads, M_TASKQUEUE); tq->tq_threads = NULL; return (ENOMEM); } for (i = 0; i < count; i++) { if (tq->tq_threads[i] == NULL) continue; td = tq->tq_threads[i]; if (mask) { error = cpuset_setthread(td->td_tid, mask); /* * Failing to pin is rarely an actual fatal error; * it'll just affect performance. */ if (error) printf("%s: curthread=%llu: can't pin; " "error=%d\n", __func__, (unsigned long long) td->td_tid, error); } thread_lock(td); sched_prio(td, pri); sched_add(td, SRQ_BORING); } return (0); } int taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, const char *name, ...) { va_list ap; int error; va_start(ap, name); error = _taskqueue_start_threads(tqp, count, pri, NULL, NULL, name, ap); va_end(ap); return (error); } int taskqueue_start_threads_in_proc(struct taskqueue **tqp, int count, int pri, struct proc *proc, const char *name, ...) { va_list ap; int error; va_start(ap, name); error = _taskqueue_start_threads(tqp, count, pri, NULL, proc, name, ap); va_end(ap); return (error); } int taskqueue_start_threads_cpuset(struct taskqueue **tqp, int count, int pri, cpuset_t *mask, const char *name, ...) { va_list ap; int error; va_start(ap, name); error = _taskqueue_start_threads(tqp, count, pri, mask, NULL, name, ap); va_end(ap); return (error); } static inline void taskqueue_run_callback(struct taskqueue *tq, enum taskqueue_callback_type cb_type) { taskqueue_callback_fn tq_callback; TQ_ASSERT_UNLOCKED(tq); tq_callback = tq->tq_callbacks[cb_type]; if (tq_callback != NULL) tq_callback(tq->tq_cb_contexts[cb_type]); } void taskqueue_thread_loop(void *arg) { struct taskqueue **tqp, *tq; tqp = arg; tq = *tqp; taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT); TQ_LOCK(tq); while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) { /* XXX ? */ taskqueue_run_locked(tq); /* * Because taskqueue_run() can drop tq_mutex, we need to * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the * meantime, which means we missed a wakeup. */ if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0) break; TQ_SLEEP(tq, tq, "-"); } taskqueue_run_locked(tq); /* * This thread is on its way out, so just drop the lock temporarily * in order to call the shutdown callback. This allows the callback * to look at the taskqueue, even just before it dies. */ TQ_UNLOCK(tq); taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN); TQ_LOCK(tq); /* rendezvous with thread that asked us to terminate */ tq->tq_tcount--; wakeup_one(tq->tq_threads); TQ_UNLOCK(tq); kthread_exit(); } void taskqueue_thread_enqueue(void *context) { struct taskqueue **tqp, *tq; tqp = context; tq = *tqp; wakeup_any(tq); } TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL, swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ, INTR_MPSAFE, &taskqueue_ih)); TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL, swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run, NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih)); TASKQUEUE_DEFINE_THREAD(thread); struct taskqueue * taskqueue_create_fast(const char *name, int mflags, taskqueue_enqueue_fn enqueue, void *context) { return _taskqueue_create(name, mflags, enqueue, context, MTX_SPIN, "fast_taskqueue"); } static void *taskqueue_fast_ih; static void taskqueue_fast_enqueue(void *context) { swi_sched(taskqueue_fast_ih, 0); } static void taskqueue_fast_run(void *dummy) { taskqueue_run(taskqueue_fast); } TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL, swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL, SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih)); int taskqueue_member(struct taskqueue *queue, struct thread *td) { int i, j, ret = 0; for (i = 0, j = 0; ; i++) { if (queue->tq_threads[i] == NULL) continue; if (queue->tq_threads[i] == td) { ret = 1; break; } if (++j >= queue->tq_tcount) break; } return (ret); }