Index: stable/11/sys/compat/linuxkpi/common/include/linux/sched.h =================================================================== --- stable/11/sys/compat/linuxkpi/common/include/linux/sched.h (revision 337896) +++ stable/11/sys/compat/linuxkpi/common/include/linux/sched.h (revision 337897) @@ -1,179 +1,181 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2018 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_SCHED_H_ #define _LINUX_SCHED_H_ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define MAX_SCHEDULE_TIMEOUT INT_MAX #define TASK_RUNNING 0x0000 #define TASK_INTERRUPTIBLE 0x0001 #define TASK_UNINTERRUPTIBLE 0x0002 #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) #define TASK_WAKING 0x0100 #define TASK_PARKED 0x0200 #define TASK_COMM_LEN (MAXCOMLEN + 1) +struct work_struct; struct task_struct { struct thread *task_thread; struct mm_struct *mm; linux_task_fn_t *task_fn; void *task_data; int task_ret; atomic_t usage; atomic_t state; atomic_t kthread_flags; pid_t pid; /* BSD thread ID */ const char *comm; void *bsd_ioctl_data; unsigned bsd_ioctl_len; struct completion parked; struct completion exited; TAILQ_ENTRY(task_struct) rcu_entry; int rcu_recurse; int bsd_interrupt_value; + struct work_struct *work; /* current work struct, if set */ }; #define current ({ \ struct thread *__td = curthread; \ linux_set_current(__td); \ ((struct task_struct *)__td->td_lkpi_task); \ }) #define task_pid_group_leader(task) (task)->task_thread->td_proc->p_pid #define task_pid(task) ((task)->pid) #define task_pid_nr(task) ((task)->pid) #define task_pid_vnr(task) ((task)->pid) #define get_pid(x) (x) #define put_pid(x) do { } while (0) #define current_euid() (curthread->td_ucred->cr_uid) #define set_task_state(task, x) atomic_set(&(task)->state, (x)) #define __set_task_state(task, x) ((task)->state.counter = (x)) #define set_current_state(x) set_task_state(current, x) #define __set_current_state(x) __set_task_state(current, x) static inline void get_task_struct(struct task_struct *task) { atomic_inc(&task->usage); } static inline void put_task_struct(struct task_struct *task) { if (atomic_dec_and_test(&task->usage)) linux_free_current(task); } #define cond_resched() do { if (!cold) sched_relinquish(curthread); } while (0) #define yield() kern_yield(PRI_UNCHANGED) #define sched_yield() sched_relinquish(curthread) #define need_resched() (curthread->td_flags & TDF_NEEDRESCHED) bool linux_signal_pending(struct task_struct *task); bool linux_fatal_signal_pending(struct task_struct *task); bool linux_signal_pending_state(long state, struct task_struct *task); void linux_send_sig(int signo, struct task_struct *task); #define signal_pending(task) linux_signal_pending(task) #define fatal_signal_pending(task) linux_fatal_signal_pending(task) #define signal_pending_state(state, task) \ linux_signal_pending_state(state, task) #define send_sig(signo, task, priv) do { \ CTASSERT((priv) == 0); \ linux_send_sig(signo, task); \ } while (0) int linux_schedule_timeout(int timeout); static inline void linux_schedule_save_interrupt_value(struct task_struct *task, int value) { task->bsd_interrupt_value = value; } static inline int linux_schedule_get_interrupt_value(struct task_struct *task) { int value = task->bsd_interrupt_value; task->bsd_interrupt_value = 0; return (value); } #define schedule() \ (void)linux_schedule_timeout(MAX_SCHEDULE_TIMEOUT) #define schedule_timeout(timeout) \ linux_schedule_timeout(timeout) #define schedule_timeout_killable(timeout) \ schedule_timeout_interruptible(timeout) #define schedule_timeout_interruptible(timeout) ({ \ set_current_state(TASK_INTERRUPTIBLE); \ schedule_timeout(timeout); \ }) #define schedule_timeout_uninterruptible(timeout) ({ \ set_current_state(TASK_UNINTERRUPTIBLE); \ schedule_timeout(timeout); \ }) #define io_schedule() schedule() #define io_schedule_timeout(timeout) schedule_timeout(timeout) static inline uint64_t local_clock(void) { struct timespec ts; nanotime(&ts); return ((uint64_t)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec); } #endif /* _LINUX_SCHED_H_ */ Index: stable/11/sys/compat/linuxkpi/common/include/linux/workqueue.h =================================================================== --- stable/11/sys/compat/linuxkpi/common/include/linux/workqueue.h (revision 337896) +++ stable/11/sys/compat/linuxkpi/common/include/linux/workqueue.h (revision 337897) @@ -1,236 +1,240 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_WORKQUEUE_H_ #define _LINUX_WORKQUEUE_H_ #include #include #include #include #include #include #include #include #include #define WORK_CPU_UNBOUND MAXCPU #define WQ_UNBOUND (1 << 0) #define WQ_HIGHPRI (1 << 1) struct work_struct; typedef void (*work_func_t)(struct work_struct *); struct work_exec { TAILQ_ENTRY(work_exec) entry; struct work_struct *target; }; struct workqueue_struct { struct taskqueue *taskqueue; struct mtx exec_mtx; TAILQ_HEAD(, work_exec) exec_head; atomic_t draining; }; #define WQ_EXEC_LOCK(wq) mtx_lock(&(wq)->exec_mtx) #define WQ_EXEC_UNLOCK(wq) mtx_unlock(&(wq)->exec_mtx) struct work_struct { struct task work_task; struct workqueue_struct *work_queue; work_func_t func; atomic_t state; }; #define DECLARE_WORK(name, fn) \ struct work_struct name; \ static void name##_init(void *arg) \ { \ INIT_WORK(&name, fn); \ } \ SYSINIT(name, SI_SUB_LOCK, SI_ORDER_SECOND, name##_init, NULL) struct delayed_work { struct work_struct work; struct { struct callout callout; struct mtx mtx; int expires; } timer; }; #define DECLARE_DELAYED_WORK(name, fn) \ struct delayed_work name; \ static void name##_init(void *arg) \ { \ linux_init_delayed_work(&name, fn); \ } \ SYSINIT(name, SI_SUB_LOCK, SI_ORDER_SECOND, name##_init, NULL) static inline struct delayed_work * to_delayed_work(struct work_struct *work) { return (container_of(work, struct delayed_work, work)); } #define INIT_WORK(work, fn) \ do { \ (work)->func = (fn); \ (work)->work_queue = NULL; \ atomic_set(&(work)->state, 0); \ TASK_INIT(&(work)->work_task, 0, linux_work_fn, (work)); \ } while (0) #define INIT_WORK_ONSTACK(work, fn) \ INIT_WORK(work, fn) #define INIT_DELAYED_WORK(dwork, fn) \ linux_init_delayed_work(dwork, fn) #define INIT_DELAYED_WORK_ONSTACK(dwork, fn) \ linux_init_delayed_work(dwork, fn) #define INIT_DEFERRABLE_WORK(dwork, fn) \ INIT_DELAYED_WORK(dwork, fn) #define flush_scheduled_work() \ taskqueue_drain_all(system_wq->taskqueue) #define queue_work(wq, work) \ linux_queue_work_on(WORK_CPU_UNBOUND, wq, work) #define schedule_work(work) \ linux_queue_work_on(WORK_CPU_UNBOUND, system_wq, work) #define queue_delayed_work(wq, dwork, delay) \ linux_queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay) #define schedule_delayed_work_on(cpu, dwork, delay) \ linux_queue_delayed_work_on(cpu, system_wq, dwork, delay) #define queue_work_on(cpu, wq, work) \ linux_queue_work_on(cpu, wq, work) #define schedule_delayed_work(dwork, delay) \ linux_queue_delayed_work_on(WORK_CPU_UNBOUND, system_wq, dwork, delay) #define queue_delayed_work_on(cpu, wq, dwork, delay) \ linux_queue_delayed_work_on(cpu, wq, dwork, delay) #define create_singlethread_workqueue(name) \ linux_create_workqueue_common(name, 1) #define create_workqueue(name) \ linux_create_workqueue_common(name, mp_ncpus) #define alloc_ordered_workqueue(name, flags) \ linux_create_workqueue_common(name, 1) #define alloc_workqueue(name, flags, max_active) \ linux_create_workqueue_common(name, max_active) #define flush_workqueue(wq) \ taskqueue_drain_all((wq)->taskqueue) #define drain_workqueue(wq) do { \ atomic_inc(&(wq)->draining); \ taskqueue_drain_all((wq)->taskqueue); \ atomic_dec(&(wq)->draining); \ } while (0) #define mod_delayed_work(wq, dwork, delay) ({ \ bool __retval; \ __retval = linux_cancel_delayed_work(dwork); \ linux_queue_delayed_work_on(WORK_CPU_UNBOUND, \ wq, dwork, delay); \ __retval; \ }) #define delayed_work_pending(dwork) \ linux_work_pending(&(dwork)->work) #define cancel_delayed_work(dwork) \ linux_cancel_delayed_work(dwork) #define cancel_work_sync(work) \ linux_cancel_work_sync(work) #define cancel_delayed_work_sync(dwork) \ linux_cancel_delayed_work_sync(dwork) #define flush_work(work) \ linux_flush_work(work) #define flush_delayed_work(dwork) \ linux_flush_delayed_work(dwork) #define work_pending(work) \ linux_work_pending(work) #define work_busy(work) \ linux_work_busy(work) #define destroy_work_on_stack(work) \ do { } while (0) #define destroy_delayed_work_on_stack(dwork) \ do { } while (0) #define destroy_workqueue(wq) \ linux_destroy_workqueue(wq) +#define current_work() \ + linux_current_work() + /* prototypes */ extern struct workqueue_struct *system_wq; extern struct workqueue_struct *system_long_wq; extern struct workqueue_struct *system_unbound_wq; extern struct workqueue_struct *system_highpri_wq; extern struct workqueue_struct *system_power_efficient_wq; extern void linux_init_delayed_work(struct delayed_work *, work_func_t); extern void linux_work_fn(void *, int); extern void linux_delayed_work_fn(void *, int); extern struct workqueue_struct *linux_create_workqueue_common(const char *, int); extern void linux_destroy_workqueue(struct workqueue_struct *); extern bool linux_queue_work_on(int cpu, struct workqueue_struct *, struct work_struct *); extern bool linux_queue_delayed_work_on(int cpu, struct workqueue_struct *, struct delayed_work *, unsigned delay); extern bool linux_cancel_delayed_work(struct delayed_work *); extern bool linux_cancel_work_sync(struct work_struct *); extern bool linux_cancel_delayed_work_sync(struct delayed_work *); extern bool linux_flush_work(struct work_struct *); extern bool linux_flush_delayed_work(struct delayed_work *); extern bool linux_work_pending(struct work_struct *); extern bool linux_work_busy(struct work_struct *); +extern struct work_struct *linux_current_work(void); #endif /* _LINUX_WORKQUEUE_H_ */ Index: stable/11/sys/compat/linuxkpi/common/src/linux_work.c =================================================================== --- stable/11/sys/compat/linuxkpi/common/src/linux_work.c (revision 337896) +++ stable/11/sys/compat/linuxkpi/common/src/linux_work.c (revision 337897) @@ -1,619 +1,632 @@ /*- * Copyright (c) 2017 Hans Petter Selasky * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include /* * Define all work struct states */ enum { WORK_ST_IDLE, /* idle - not started */ WORK_ST_TIMER, /* timer is being started */ WORK_ST_TASK, /* taskqueue is being queued */ WORK_ST_EXEC, /* callback is being called */ WORK_ST_CANCEL, /* cancel is being requested */ WORK_ST_MAX, }; /* * Define global workqueues */ static struct workqueue_struct *linux_system_short_wq; static struct workqueue_struct *linux_system_long_wq; struct workqueue_struct *system_wq; struct workqueue_struct *system_long_wq; struct workqueue_struct *system_unbound_wq; struct workqueue_struct *system_highpri_wq; struct workqueue_struct *system_power_efficient_wq; static int linux_default_wq_cpus = 4; static void linux_delayed_work_timer_fn(void *); /* * This function atomically updates the work state and returns the * previous state at the time of update. */ static uint8_t linux_update_state(atomic_t *v, const uint8_t *pstate) { int c, old; c = v->counter; while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) c = old; return (c); } /* * A LinuxKPI task is allowed to free itself inside the callback function * and cannot safely be referred after the callback function has * completed. This function gives the linux_work_fn() function a hint, * that the task is not going away and can have its state checked * again. Without this extra hint LinuxKPI tasks cannot be serialized * accross multiple worker threads. */ static bool linux_work_exec_unblock(struct work_struct *work) { struct workqueue_struct *wq; struct work_exec *exec; bool retval = 0; wq = work->work_queue; if (unlikely(wq == NULL)) goto done; WQ_EXEC_LOCK(wq); TAILQ_FOREACH(exec, &wq->exec_head, entry) { if (exec->target == work) { exec->target = NULL; retval = 1; break; } } WQ_EXEC_UNLOCK(wq); done: return (retval); } static void linux_delayed_work_enqueue(struct delayed_work *dwork) { struct taskqueue *tq; tq = dwork->work.work_queue->taskqueue; taskqueue_enqueue(tq, &dwork->work.work_task); } /* * This function queues the given work structure on the given * workqueue. It returns non-zero if the work was successfully * [re-]queued. Else the work is already pending for completion. */ bool linux_queue_work_on(int cpu __unused, struct workqueue_struct *wq, struct work_struct *work) { static const uint8_t states[WORK_ST_MAX] __aligned(8) = { [WORK_ST_IDLE] = WORK_ST_TASK, /* start queuing task */ [WORK_ST_TIMER] = WORK_ST_TIMER, /* NOP */ [WORK_ST_TASK] = WORK_ST_TASK, /* NOP */ [WORK_ST_EXEC] = WORK_ST_TASK, /* queue task another time */ [WORK_ST_CANCEL] = WORK_ST_TASK, /* start queuing task again */ }; if (atomic_read(&wq->draining) != 0) return (!work_pending(work)); switch (linux_update_state(&work->state, states)) { case WORK_ST_EXEC: case WORK_ST_CANCEL: if (linux_work_exec_unblock(work) != 0) return (1); /* FALLTHROUGH */ case WORK_ST_IDLE: work->work_queue = wq; taskqueue_enqueue(wq->taskqueue, &work->work_task); return (1); default: return (0); /* already on a queue */ } } /* * This function queues the given work structure on the given * workqueue after a given delay in ticks. It returns non-zero if the * work was successfully [re-]queued. Else the work is already pending * for completion. */ bool linux_queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned delay) { static const uint8_t states[WORK_ST_MAX] __aligned(8) = { [WORK_ST_IDLE] = WORK_ST_TIMER, /* start timeout */ [WORK_ST_TIMER] = WORK_ST_TIMER, /* NOP */ [WORK_ST_TASK] = WORK_ST_TASK, /* NOP */ [WORK_ST_EXEC] = WORK_ST_TIMER, /* start timeout */ [WORK_ST_CANCEL] = WORK_ST_TIMER, /* start timeout */ }; if (atomic_read(&wq->draining) != 0) return (!work_pending(&dwork->work)); switch (linux_update_state(&dwork->work.state, states)) { case WORK_ST_EXEC: case WORK_ST_CANCEL: if (delay == 0 && linux_work_exec_unblock(&dwork->work) != 0) { dwork->timer.expires = jiffies; return (1); } /* FALLTHROUGH */ case WORK_ST_IDLE: dwork->work.work_queue = wq; dwork->timer.expires = jiffies + delay; if (delay == 0) { linux_delayed_work_enqueue(dwork); } else if (unlikely(cpu != WORK_CPU_UNBOUND)) { mtx_lock(&dwork->timer.mtx); callout_reset_on(&dwork->timer.callout, delay, &linux_delayed_work_timer_fn, dwork, cpu); mtx_unlock(&dwork->timer.mtx); } else { mtx_lock(&dwork->timer.mtx); callout_reset(&dwork->timer.callout, delay, &linux_delayed_work_timer_fn, dwork); mtx_unlock(&dwork->timer.mtx); } return (1); default: return (0); /* already on a queue */ } } void linux_work_fn(void *context, int pending) { static const uint8_t states[WORK_ST_MAX] __aligned(8) = { [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */ [WORK_ST_TIMER] = WORK_ST_EXEC, /* delayed work w/o timeout */ [WORK_ST_TASK] = WORK_ST_EXEC, /* call callback */ [WORK_ST_EXEC] = WORK_ST_IDLE, /* complete callback */ [WORK_ST_CANCEL] = WORK_ST_EXEC, /* failed to cancel */ }; struct work_struct *work; struct workqueue_struct *wq; struct work_exec exec; + struct task_struct *task; - linux_set_current(curthread); + task = current; /* setup local variables */ work = context; wq = work->work_queue; /* store target pointer */ exec.target = work; /* insert executor into list */ WQ_EXEC_LOCK(wq); TAILQ_INSERT_TAIL(&wq->exec_head, &exec, entry); while (1) { switch (linux_update_state(&work->state, states)) { case WORK_ST_TIMER: case WORK_ST_TASK: case WORK_ST_CANCEL: WQ_EXEC_UNLOCK(wq); + /* set current work structure */ + task->work = work; + /* call work function */ work->func(work); + /* set current work structure */ + task->work = NULL; + WQ_EXEC_LOCK(wq); /* check if unblocked */ if (exec.target != work) { /* reapply block */ exec.target = work; break; } /* FALLTHROUGH */ default: goto done; } } done: /* remove executor from list */ TAILQ_REMOVE(&wq->exec_head, &exec, entry); WQ_EXEC_UNLOCK(wq); } void linux_delayed_work_fn(void *context, int pending) { struct delayed_work *dwork = context; /* * Make sure the timer belonging to the delayed work gets * drained before invoking the work function. Else the timer * mutex may still be in use which can lead to use-after-free * situations, because the work function might free the work * structure before returning. */ callout_drain(&dwork->timer.callout); linux_work_fn(&dwork->work, pending); } static void linux_delayed_work_timer_fn(void *arg) { static const uint8_t states[WORK_ST_MAX] __aligned(8) = { [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */ [WORK_ST_TIMER] = WORK_ST_TASK, /* start queueing task */ [WORK_ST_TASK] = WORK_ST_TASK, /* NOP */ [WORK_ST_EXEC] = WORK_ST_EXEC, /* NOP */ [WORK_ST_CANCEL] = WORK_ST_TASK, /* failed to cancel */ }; struct delayed_work *dwork = arg; switch (linux_update_state(&dwork->work.state, states)) { case WORK_ST_TIMER: case WORK_ST_CANCEL: linux_delayed_work_enqueue(dwork); break; default: break; } } /* * This function cancels the given work structure in a synchronous * fashion. It returns non-zero if the work was successfully * cancelled. Else the work was already cancelled. */ bool linux_cancel_work_sync(struct work_struct *work) { static const uint8_t states[WORK_ST_MAX] __aligned(8) = { [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */ [WORK_ST_TIMER] = WORK_ST_TIMER, /* can't happen */ [WORK_ST_TASK] = WORK_ST_IDLE, /* cancel and drain */ [WORK_ST_EXEC] = WORK_ST_IDLE, /* too late, drain */ [WORK_ST_CANCEL] = WORK_ST_IDLE, /* cancel and drain */ }; struct taskqueue *tq; WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "linux_cancel_work_sync() might sleep"); switch (linux_update_state(&work->state, states)) { case WORK_ST_IDLE: case WORK_ST_TIMER: return (0); case WORK_ST_EXEC: tq = work->work_queue->taskqueue; if (taskqueue_cancel(tq, &work->work_task, NULL) != 0) taskqueue_drain(tq, &work->work_task); return (0); default: tq = work->work_queue->taskqueue; if (taskqueue_cancel(tq, &work->work_task, NULL) != 0) taskqueue_drain(tq, &work->work_task); return (1); } } /* * This function atomically stops the timer and callback. The timer * callback will not be called after this function returns. This * functions returns true when the timeout was cancelled. Else the * timeout was not started or has already been called. */ static inline bool linux_cancel_timer(struct delayed_work *dwork, bool drain) { bool cancelled; mtx_lock(&dwork->timer.mtx); cancelled = (callout_stop(&dwork->timer.callout) == 1); mtx_unlock(&dwork->timer.mtx); /* check if we should drain */ if (drain) callout_drain(&dwork->timer.callout); return (cancelled); } /* * This function cancels the given delayed work structure in a * non-blocking fashion. It returns non-zero if the work was * successfully cancelled. Else the work may still be busy or already * cancelled. */ bool linux_cancel_delayed_work(struct delayed_work *dwork) { static const uint8_t states[WORK_ST_MAX] __aligned(8) = { [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */ [WORK_ST_TIMER] = WORK_ST_CANCEL, /* try to cancel */ [WORK_ST_TASK] = WORK_ST_CANCEL, /* try to cancel */ [WORK_ST_EXEC] = WORK_ST_EXEC, /* NOP */ [WORK_ST_CANCEL] = WORK_ST_CANCEL, /* NOP */ }; struct taskqueue *tq; switch (linux_update_state(&dwork->work.state, states)) { case WORK_ST_TIMER: case WORK_ST_CANCEL: if (linux_cancel_timer(dwork, 0)) { atomic_cmpxchg(&dwork->work.state, WORK_ST_CANCEL, WORK_ST_IDLE); return (1); } /* FALLTHROUGH */ case WORK_ST_TASK: tq = dwork->work.work_queue->taskqueue; if (taskqueue_cancel(tq, &dwork->work.work_task, NULL) == 0) { atomic_cmpxchg(&dwork->work.state, WORK_ST_CANCEL, WORK_ST_IDLE); return (1); } /* FALLTHROUGH */ default: return (0); } } /* * This function cancels the given work structure in a synchronous * fashion. It returns non-zero if the work was successfully * cancelled. Else the work was already cancelled. */ bool linux_cancel_delayed_work_sync(struct delayed_work *dwork) { static const uint8_t states[WORK_ST_MAX] __aligned(8) = { [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */ [WORK_ST_TIMER] = WORK_ST_IDLE, /* cancel and drain */ [WORK_ST_TASK] = WORK_ST_IDLE, /* cancel and drain */ [WORK_ST_EXEC] = WORK_ST_IDLE, /* too late, drain */ [WORK_ST_CANCEL] = WORK_ST_IDLE, /* cancel and drain */ }; struct taskqueue *tq; WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "linux_cancel_delayed_work_sync() might sleep"); switch (linux_update_state(&dwork->work.state, states)) { case WORK_ST_IDLE: return (0); case WORK_ST_EXEC: tq = dwork->work.work_queue->taskqueue; if (taskqueue_cancel(tq, &dwork->work.work_task, NULL) != 0) taskqueue_drain(tq, &dwork->work.work_task); return (0); case WORK_ST_TIMER: case WORK_ST_CANCEL: if (linux_cancel_timer(dwork, 1)) { /* * Make sure taskqueue is also drained before * returning: */ tq = dwork->work.work_queue->taskqueue; taskqueue_drain(tq, &dwork->work.work_task); return (1); } /* FALLTHROUGH */ default: tq = dwork->work.work_queue->taskqueue; if (taskqueue_cancel(tq, &dwork->work.work_task, NULL) != 0) taskqueue_drain(tq, &dwork->work.work_task); return (1); } } /* * This function waits until the given work structure is completed. * It returns non-zero if the work was successfully * waited for. Else the work was not waited for. */ bool linux_flush_work(struct work_struct *work) { struct taskqueue *tq; int retval; WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "linux_flush_work() might sleep"); switch (atomic_read(&work->state)) { case WORK_ST_IDLE: return (0); default: tq = work->work_queue->taskqueue; retval = taskqueue_poll_is_busy(tq, &work->work_task); taskqueue_drain(tq, &work->work_task); return (retval); } } /* * This function waits until the given delayed work structure is * completed. It returns non-zero if the work was successfully waited * for. Else the work was not waited for. */ bool linux_flush_delayed_work(struct delayed_work *dwork) { struct taskqueue *tq; int retval; WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "linux_flush_delayed_work() might sleep"); switch (atomic_read(&dwork->work.state)) { case WORK_ST_IDLE: return (0); case WORK_ST_TIMER: if (linux_cancel_timer(dwork, 1)) linux_delayed_work_enqueue(dwork); /* FALLTHROUGH */ default: tq = dwork->work.work_queue->taskqueue; retval = taskqueue_poll_is_busy(tq, &dwork->work.work_task); taskqueue_drain(tq, &dwork->work.work_task); return (retval); } } /* * This function returns true if the given work is pending, and not * yet executing: */ bool linux_work_pending(struct work_struct *work) { switch (atomic_read(&work->state)) { case WORK_ST_TIMER: case WORK_ST_TASK: case WORK_ST_CANCEL: return (1); default: return (0); } } /* * This function returns true if the given work is busy. */ bool linux_work_busy(struct work_struct *work) { struct taskqueue *tq; switch (atomic_read(&work->state)) { case WORK_ST_IDLE: return (0); case WORK_ST_EXEC: tq = work->work_queue->taskqueue; return (taskqueue_poll_is_busy(tq, &work->work_task)); default: return (1); } } struct workqueue_struct * linux_create_workqueue_common(const char *name, int cpus) { struct workqueue_struct *wq; /* * If zero CPUs are specified use the default number of CPUs: */ if (cpus == 0) cpus = linux_default_wq_cpus; wq = kmalloc(sizeof(*wq), M_WAITOK | M_ZERO); wq->taskqueue = taskqueue_create(name, M_WAITOK, taskqueue_thread_enqueue, &wq->taskqueue); atomic_set(&wq->draining, 0); taskqueue_start_threads(&wq->taskqueue, cpus, PWAIT, "%s", name); TAILQ_INIT(&wq->exec_head); mtx_init(&wq->exec_mtx, "linux_wq_exec", NULL, MTX_DEF); return (wq); } void linux_destroy_workqueue(struct workqueue_struct *wq) { atomic_inc(&wq->draining); drain_workqueue(wq); taskqueue_free(wq->taskqueue); mtx_destroy(&wq->exec_mtx); kfree(wq); } void linux_init_delayed_work(struct delayed_work *dwork, work_func_t func) { memset(dwork, 0, sizeof(*dwork)); dwork->work.func = func; TASK_INIT(&dwork->work.work_task, 0, linux_delayed_work_fn, dwork); mtx_init(&dwork->timer.mtx, spin_lock_name("lkpi-dwork"), NULL, MTX_DEF | MTX_NOWITNESS); callout_init_mtx(&dwork->timer.callout, &dwork->timer.mtx, 0); +} + +struct work_struct * +linux_current_work(void) +{ + return (current->work); } static void linux_work_init(void *arg) { int max_wq_cpus = mp_ncpus + 1; /* avoid deadlock when there are too few threads */ if (max_wq_cpus < 4) max_wq_cpus = 4; /* set default number of CPUs */ linux_default_wq_cpus = max_wq_cpus; linux_system_short_wq = alloc_workqueue("linuxkpi_short_wq", 0, max_wq_cpus); linux_system_long_wq = alloc_workqueue("linuxkpi_long_wq", 0, max_wq_cpus); /* populate the workqueue pointers */ system_long_wq = linux_system_long_wq; system_wq = linux_system_short_wq; system_power_efficient_wq = linux_system_short_wq; system_unbound_wq = linux_system_short_wq; system_highpri_wq = linux_system_short_wq; } SYSINIT(linux_work_init, SI_SUB_TASKQ, SI_ORDER_THIRD, linux_work_init, NULL); static void linux_work_uninit(void *arg) { destroy_workqueue(linux_system_short_wq); destroy_workqueue(linux_system_long_wq); /* clear workqueue pointers */ system_long_wq = NULL; system_wq = NULL; system_power_efficient_wq = NULL; system_unbound_wq = NULL; system_highpri_wq = NULL; } SYSUNINIT(linux_work_uninit, SI_SUB_TASKQ, SI_ORDER_THIRD, linux_work_uninit, NULL); Index: stable/11 =================================================================== --- stable/11 (revision 337896) +++ stable/11 (revision 337897) Property changes on: stable/11 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r337376