Index: sys/compat/linuxkpi/common/include/linux/workqueue.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/workqueue.h +++ sys/compat/linuxkpi/common/include/linux/workqueue.h @@ -72,6 +72,13 @@ atomic_t state; }; +struct rcu_work { + struct work_struct work; + struct rcu_head rcu; + + struct workqueue_struct *wq; +}; + #define DECLARE_WORK(name, fn) \ struct work_struct name; \ static void name##_init(void *arg) \ @@ -112,6 +119,9 @@ TASK_INIT(&(work)->work_task, 0, linux_work_fn, (work)); \ } while (0) +#define INIT_RCU_WORK(_work, _fn) \ + INIT_WORK(&(_work)->work, (_fn)) + #define INIT_WORK_ONSTACK(work, fn) \ INIT_WORK(work, fn) @@ -192,6 +202,12 @@ #define flush_work(work) \ linux_flush_work(work) +#define queue_rcu_work(wq, rwork) \ + linux_queue_rcu_work(wq, rwork) + +#define flush_rcu_work(rwork) \ + linux_flush_rcu_work(rwork) + #define flush_delayed_work(dwork) \ linux_flush_delayed_work(dwork) @@ -237,5 +253,7 @@ extern bool linux_work_pending(struct work_struct *); extern bool linux_work_busy(struct work_struct *); extern struct work_struct *linux_current_work(void); +extern bool linux_queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork); +extern bool linux_flush_rcu_work(struct rcu_work *rwork); #endif /* _LINUX_WORKQUEUE_H_ */ Index: sys/compat/linuxkpi/common/src/linux_work.c =================================================================== --- sys/compat/linuxkpi/common/src/linux_work.c +++ sys/compat/linuxkpi/common/src/linux_work.c @@ -31,6 +31,7 @@ #include #include #include +#include #include @@ -155,6 +156,53 @@ } } +/* + * Callback func for linux_queue_rcu_work + */ +static void +rcu_work_func(struct rcu_head *rcu) +{ + struct rcu_work *rwork; + + rwork = container_of(rcu, struct rcu_work, rcu); + linux_queue_work_on(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); +} + +/* + * This function queue a work after a grace period + * If the work was already pending it returns false, + * if not it calls call_rcu and returns true. + */ +bool +linux_queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork) +{ + + if (!linux_work_pending(&rwork->work)) { + rwork->wq = wq; + linux_call_rcu(RCU_TYPE_REGULAR, &rwork->rcu, rcu_work_func); + return (true); + } + return (false); +} + +/* + * This function waits for the last execution of a work and then + * flush the work. + * It returns true if the work was pending and we waited, it returns + * false otherwise. + */ +bool +linux_flush_rcu_work(struct rcu_work *rwork) +{ + + if (linux_work_pending(&rwork->work)) { + linux_rcu_barrier(RCU_TYPE_REGULAR); + linux_flush_work(&rwork->work); + return (true); + } + return (linux_flush_work(&rwork->work)); +} + /* * This function queues the given work structure on the given * workqueue after a given delay in ticks. It returns non-zero if the