Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F152474617
D56308.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
7 KB
Referenced Files
None
Subscribers
None
D56308.diff
View Options
diff --git a/sys/compat/linuxkpi/common/include/linux/workqueue.h b/sys/compat/linuxkpi/common/include/linux/workqueue.h
--- a/sys/compat/linuxkpi/common/include/linux/workqueue.h
+++ b/sys/compat/linuxkpi/common/include/linux/workqueue.h
@@ -177,13 +177,8 @@
atomic_dec(&(wq)->draining); \
} while (0)
-#define mod_delayed_work(wq, dwork, delay) ({ \
- bool __retval; \
- __retval = linux_cancel_delayed_work(dwork); \
- linux_queue_delayed_work_on(WORK_CPU_UNBOUND, \
- wq, dwork, delay); \
- __retval; \
-})
+#define mod_delayed_work(wq, dwork, delay) \
+ linux_mod_delayed_work(wq, dwork, delay)
#define delayed_work_pending(dwork) \
linux_work_pending(&(dwork)->work)
@@ -250,6 +245,8 @@
extern bool linux_cancel_delayed_work(struct delayed_work *);
extern bool linux_cancel_work_sync(struct work_struct *);
extern bool linux_cancel_delayed_work_sync(struct delayed_work *);
+extern bool linux_mod_delayed_work(struct workqueue_struct *,
+ struct delayed_work *, unsigned long);
extern bool linux_flush_work(struct work_struct *);
extern bool linux_flush_delayed_work(struct delayed_work *);
extern bool linux_work_pending(struct work_struct *);
diff --git a/sys/compat/linuxkpi/common/src/linux_work.c b/sys/compat/linuxkpi/common/src/linux_work.c
--- a/sys/compat/linuxkpi/common/src/linux_work.c
+++ b/sys/compat/linuxkpi/common/src/linux_work.c
@@ -33,6 +33,7 @@
#include <linux/irq_work.h>
#include <sys/kernel.h>
+#include <sys/sdt.h>
/*
* Define all work struct states
@@ -64,6 +65,12 @@
static void linux_delayed_work_timer_fn(void *);
+static uint8_t
+linux_fetch_state(atomic_t *v)
+{
+ return (atomic_read(v));
+}
+
/*
* This function atomically updates the work state and returns the
* previous state at the time of update.
@@ -210,8 +217,8 @@
* work was successfully [re-]queued. Else the work is already pending
* for completion.
*/
-bool
-linux_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
+static bool
+linux_queue_delayed_work_on_locked(int cpu, struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay)
{
static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
@@ -223,6 +230,7 @@
};
bool res;
+ mtx_assert(&dwork->timer.mtx, MA_OWNED);
if (atomic_read(&wq->draining) != 0)
return (!work_pending(&dwork->work));
@@ -233,14 +241,13 @@
if (delay > INT_MAX)
delay = INT_MAX;
- mtx_lock(&dwork->timer.mtx);
switch (linux_update_state(&dwork->work.state, states)) {
case WORK_ST_EXEC:
case WORK_ST_CANCEL:
if (delay == 0 && linux_work_exec_unblock(&dwork->work)) {
dwork->timer.expires = jiffies;
res = true;
- goto out;
+ break;
}
/* FALLTHROUGH */
case WORK_ST_IDLE:
@@ -262,11 +269,22 @@
res = false;
break;
}
-out:
- mtx_unlock(&dwork->timer.mtx);
+
return (res);
}
+bool
+linux_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
+ struct delayed_work *dwork, unsigned long delay)
+{
+ bool ret;
+
+ mtx_lock(&dwork->timer.mtx);
+ ret = linux_queue_delayed_work_on_locked(cpu, wq, dwork, delay);
+ mtx_unlock(&dwork->timer.mtx);
+ return (ret);
+}
+
void
linux_work_fn(void *context, int pending)
{
@@ -505,6 +523,147 @@
}
}
+SDT_PROBE_DEFINE2(sdt, , linux_mod_delayed_work, busy__retry,
+ "struct workqueue_struct *", "struct delayed_work *");
+SDT_PROBE_DEFINE2(sdt, , linux_mod_delayed_work, busy__forward,
+ "struct workqueue_struct *", "struct delayed_work *");
+
+/*
+ * This function resets the timer on a pending work structure if it is currently
+ * schduled but not running, or schedules the work structure if it is running or
+ * idle.
+ */
+bool
+linux_mod_delayed_work(struct workqueue_struct *wq,
+ struct delayed_work *dwork, unsigned long delay)
+{
+ static const uint8_t states[WORK_ST_MAX] = {
+ [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
+ [WORK_ST_TIMER] = WORK_ST_CANCEL, /* try to cancel */
+ [WORK_ST_TASK] = WORK_ST_CANCEL, /* try to cancel */
+ [WORK_ST_EXEC] = WORK_ST_EXEC, /* NOP */
+ [WORK_ST_CANCEL] = WORK_ST_CANCEL, /* NOP */
+ };
+ struct taskqueue *tq;
+ int error;
+ bool cancelled, running;
+
+ mtx_lock(&dwork->timer.mtx);
+retry:
+ /*
+ * These are all reset every time because the return value shouldn't
+ * use any state from the time we were called. If the task was running
+ * and we had to wait for it to stop, then we may very well still have
+ * to cancel it if we lost some race.
+ */
+ cancelled = running = false;
+ switch (linux_update_state(&dwork->work.state, states)) {
+ case WORK_ST_TIMER:
+ case WORK_ST_CANCEL:
+ error = callout_stop(&dwork->timer.callout);
+ cancelled = (error == 1);
+ if (cancelled) {
+ /* Canceled */
+ atomic_cmpxchg(&dwork->work.state,
+ WORK_ST_CANCEL, WORK_ST_IDLE);
+ break;
+ } else if (error == 0) {
+ /*
+ * Running, too late to cancel. Even if we managed to
+ * transition the state, it's likely waiting to pick
+ * up the lock and transition it back.
+ */
+ running = true;
+ break;
+ }
+
+ /*
+ * The callout is not running and not scheduled, so we may have
+ * a chance to cancel the taskqueue still.
+ */
+
+ /* FALLTHROUGH */
+ case WORK_ST_TASK:
+ tq = dwork->work.work_queue->taskqueue;
+ if (taskqueue_cancel(tq, &dwork->work.work_task, NULL) == 0) {
+ cancelled = true;
+ atomic_cmpxchg(&dwork->work.state,
+ WORK_ST_CANCEL, WORK_ST_IDLE);
+ break;
+ }
+
+ running = true;
+ break;
+ default:
+ break;
+ }
+
+ /* We'll pick the lock back up before we leave the loop. */
+ if (running)
+ mtx_unlock(&dwork->timer.mtx);
+ while (running) {
+ uint8_t state;
+
+ /*
+ * Busy loop until the task goes to the idle state.
+ * We consider CANCEL to be 'running' since we just transitioned
+ * it right before this loop, and the taskqueue/callout is
+ * expected to correct it.
+ */
+ maybe_yield();
+ state = linux_fetch_state(&dwork->work.state);
+ switch (state) {
+ case WORK_ST_CANCEL:
+ case WORK_ST_TASK:
+ case WORK_ST_TIMER:
+ running = true;
+ break;
+ default:
+ running = false;
+
+ /*
+ * Confirm that we're still in a state where it can be
+ * scheduled, now that we picked the lock back up.
+ */
+ mtx_lock(&dwork->timer.mtx);
+ state = linux_fetch_state(&dwork->work.state);
+ switch (state) {
+ case WORK_ST_EXEC:
+ case WORK_ST_IDLE:
+ SDT_PROBE2(sdt, , linux_mod_delayed_work,
+ busy__forward, wq, dwork);
+ break;
+ default:
+ SDT_PROBE2(sdt, , linux_mod_delayed_work,
+ busy__retry, wq, dwork);
+ goto retry;
+ }
+
+ break;
+ }
+ }
+
+ /*
+ * There's no reason rescheduling it should fail at this point, we
+ * waited for it to become idle and kept it locked after observing
+ * that state.
+ */
+ running = linux_queue_delayed_work_on_locked(WORK_CPU_UNBOUND, wq,
+ dwork, delay);
+ MPASS(running);
+ mtx_unlock(&dwork->timer.mtx);
+
+ /*
+ * The caller's expectation is that we return true if the task was
+ * scheduled by some other invocation. If we had to cancel it, then
+ * we've effectively just adjusted the delay and some other thread is
+ * responsible for the task (considering the case where the caller may
+ * have taken a refcount and expects to release the refcount if some
+ * other thread had already done this).
+ */
+ return (cancelled);
+}
+
/*
* This function cancels the given work structure in a synchronous
* fashion. It returns true if the work was successfully
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Thu, Apr 16, 4:44 AM (3 h, 52 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
31579441
Default Alt Text
D56308.diff (7 KB)
Attached To
Mode
D56308: linuxkpi: fix mod_delayed_work() with running tasks
Attached
Detach File
Event Timeline
Log In to Comment