Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F133517289
D10986.id29394.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
24 KB
Referenced Files
None
Subscribers
None
D10986.id29394.diff
View Options
Index: head/sys/compat/linuxkpi/common/include/linux/sched.h
===================================================================
--- head/sys/compat/linuxkpi/common/include/linux/sched.h
+++ head/sys/compat/linuxkpi/common/include/linux/sched.h
@@ -48,14 +48,13 @@
#include <asm/atomic.h>
-#define MAX_SCHEDULE_TIMEOUT LONG_MAX
+#define MAX_SCHEDULE_TIMEOUT INT_MAX
-#define TASK_RUNNING 0
-#define TASK_INTERRUPTIBLE 1
-#define TASK_UNINTERRUPTIBLE 2
-#define TASK_DEAD 64
-#define TASK_WAKEKILL 128
-#define TASK_WAKING 256
+#define TASK_RUNNING 0x0000
+#define TASK_INTERRUPTIBLE 0x0001
+#define TASK_UNINTERRUPTIBLE 0x0002
+#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
+#define TASK_WAKING 0x0100
struct task_struct {
struct thread *task_thread;
@@ -89,9 +88,11 @@
#define put_pid(x) do { } while (0)
#define current_euid() (curthread->td_ucred->cr_uid)
-#define set_current_state(x) \
- atomic_store_rel_int((volatile int *)¤t->state, (x))
-#define __set_current_state(x) current->state = (x)
+#define set_task_state(task, x) \
+ atomic_store_rel_int((volatile int *)&task->state, (x))
+#define __set_task_state(task, x) (task->state = (x))
+#define set_current_state(x) set_task_state(current, x)
+#define __set_current_state(x) __set_task_state(current, x)
static inline void
get_task_struct(struct task_struct *task)
@@ -106,53 +107,45 @@
linux_free_current(task);
}
-#define schedule() \
-do { \
- void *c; \
- \
- if (cold || SCHEDULER_STOPPED()) \
- break; \
- c = curthread; \
- sleepq_lock(c); \
- if (current->state == TASK_INTERRUPTIBLE || \
- current->state == TASK_UNINTERRUPTIBLE) { \
- sleepq_add(c, NULL, "task", SLEEPQ_SLEEP, 0); \
- sleepq_wait(c, 0); \
- } else { \
- sleepq_release(c); \
- sched_relinquish(curthread); \
- } \
-} while (0)
-
-#define wake_up_process(x) \
-do { \
- int wakeup_swapper; \
- void *c; \
- \
- c = (x)->task_thread; \
- sleepq_lock(c); \
- (x)->state = TASK_RUNNING; \
- wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); \
- sleepq_release(c); \
- if (wakeup_swapper) \
- kick_proc0(); \
-} while (0)
-
#define cond_resched() if (!cold) sched_relinquish(curthread)
+#define yield() kern_yield(PRI_UNCHANGED)
#define sched_yield() sched_relinquish(curthread)
-static inline long
-schedule_timeout(signed long timeout)
-{
- if (timeout < 0)
- return 0;
+#define need_resched() (curthread->td_flags & TDF_NEEDRESCHED)
- pause("lstim", timeout);
+bool linux_signal_pending(struct task_struct *task);
+bool linux_fatal_signal_pending(struct task_struct *task);
+bool linux_signal_pending_state(long state, struct task_struct *task);
+void linux_send_sig(int signo, struct task_struct *task);
- return 0;
-}
+#define signal_pending(task) linux_signal_pending(task)
+#define fatal_signal_pending(task) linux_fatal_signal_pending(task)
+#define signal_pending_state(state, task) \
+ linux_signal_pending_state(state, task)
+#define send_sig(signo, task, priv) do { \
+ CTASSERT(priv == 0); \
+ linux_send_sig(signo, task); \
+} while (0)
-#define need_resched() (curthread->td_flags & TDF_NEEDRESCHED)
+int linux_schedule_timeout(int timeout);
+
+#define schedule() \
+ (void)linux_schedule_timeout(MAX_SCHEDULE_TIMEOUT)
+#define schedule_timeout(timeout) \
+ linux_schedule_timeout(timeout)
+#define schedule_timeout_killable(timeout) \
+ schedule_timeout_uninterruptible(timeout)
+#define schedule_timeout_interruptible(timeout) ({ \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ schedule_timeout(timeout); \
+})
+#define schedule_timeout_uninterruptible(timeout) ({ \
+ set_current_state(TASK_UNINTERRUPTIBLE); \
+ schedule_timeout(timeout); \
+})
+
+#define io_schedule() schedule()
+#define io_schedule_timeout(timeout) schedule_timeout(timeout)
#endif /* _LINUX_SCHED_H_ */
Index: head/sys/compat/linuxkpi/common/include/linux/wait.h
===================================================================
--- head/sys/compat/linuxkpi/common/include/linux/wait.h
+++ head/sys/compat/linuxkpi/common/include/linux/wait.h
@@ -3,6 +3,7 @@
* Copyright (c) 2010 iX Systems, Inc.
* Copyright (c) 2010 Panasas, Inc.
* Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -28,160 +29,243 @@
*
* $FreeBSD$
*/
-#ifndef _LINUX_WAIT_H_
+
+#ifndef _LINUX_WAIT_H_
#define _LINUX_WAIT_H_
#include <linux/compiler.h>
#include <linux/list.h>
-#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+
#include <sys/param.h>
#include <sys/systm.h>
-#include <sys/sleepqueue.h>
-#include <sys/kernel.h>
-#include <sys/proc.h>
-typedef struct {
-} wait_queue_t;
+#define SKIP_SLEEP() (SCHEDULER_STOPPED() || kdb_active)
-typedef struct {
- unsigned int wchan;
-} wait_queue_head_t;
+#define might_sleep() \
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "might_sleep()")
-#define init_waitqueue_head(x) \
- do { } while (0)
+struct wait_queue;
+struct wait_queue_head;
-static inline void
-__wake_up(wait_queue_head_t *q, int all)
-{
- int wakeup_swapper;
- void *c;
+typedef struct wait_queue wait_queue_t;
+typedef struct wait_queue_head wait_queue_head_t;
- c = &q->wchan;
- sleepq_lock(c);
- if (all)
- wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0);
- else
- wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0);
- sleepq_release(c);
- if (wakeup_swapper)
- kick_proc0();
-}
+typedef int wait_queue_func_t(wait_queue_t *, unsigned int, int, void *);
-#define wake_up(q) __wake_up(q, 0)
-#define wake_up_nr(q, nr) __wake_up(q, 1)
-#define wake_up_all(q) __wake_up(q, 1)
-#define wake_up_interruptible(q) __wake_up(q, 0)
-#define wake_up_interruptible_nr(q, nr) __wake_up(q, 1)
-#define wake_up_interruptible_all(q, nr) __wake_up(q, 1)
+/*
+ * Many API consumers directly reference these fields and those of
+ * wait_queue_head.
+ */
+struct wait_queue {
+ unsigned int flags; /* always 0 */
+ void *private;
+ wait_queue_func_t *func;
+ struct list_head task_list;
+};
-#define wait_event(q, cond) \
-do { \
- void *c = &(q).wchan; \
- if (!(cond)) { \
- for (;;) { \
- if (SCHEDULER_STOPPED()) \
- break; \
- sleepq_lock(c); \
- if (cond) { \
- sleepq_release(c); \
- break; \
- } \
- sleepq_add(c, NULL, "completion", SLEEPQ_SLEEP, 0); \
- sleepq_wait(c, 0); \
- } \
- } \
+struct wait_queue_head {
+ spinlock_t lock;
+ struct list_head task_list;
+};
+
+/*
+ * This function is referenced by at least one DRM driver, so it may not be
+ * renamed and furthermore must be the default wait queue callback.
+ */
+extern wait_queue_func_t autoremove_wake_function;
+
+#define DEFINE_WAIT(name) \
+ wait_queue_t name = { \
+ .private = current, \
+ .func = autoremove_wake_function, \
+ .task_list = LINUX_LIST_HEAD_INIT(name.task_list) \
+ }
+
+#define DECLARE_WAITQUEUE(name, task) \
+ wait_queue_t name = { \
+ .private = task, \
+ .task_list = LINUX_LIST_HEAD_INIT(name.task_list) \
+ }
+
+#define DECLARE_WAIT_QUEUE_HEAD(name) \
+ wait_queue_head_t name = { \
+ .task_list = LINUX_LIST_HEAD_INIT(name.task_list), \
+ }; \
+ MTX_SYSINIT(name, &(name).lock.m, spin_lock_name("wqhead"), MTX_DEF)
+
+#define init_waitqueue_head(wqh) do { \
+ mtx_init(&(wqh)->lock.m, spin_lock_name("wqhead"), \
+ NULL, MTX_DEF | MTX_NEW | MTX_NOWITNESS); \
+ INIT_LIST_HEAD(&(wqh)->task_list); \
} while (0)
-#define wait_event_interruptible(q, cond) \
-({ \
- void *c = &(q).wchan; \
- int _error; \
- \
- _error = 0; \
- if (!(cond)) { \
- for (; _error == 0;) { \
- if (SCHEDULER_STOPPED()) \
- break; \
- sleepq_lock(c); \
- if (cond) { \
- sleepq_release(c); \
- break; \
- } \
- sleepq_add(c, NULL, "completion", \
- SLEEPQ_SLEEP | SLEEPQ_INTERRUPTIBLE, 0); \
- if (sleepq_wait_sig(c, 0)) \
- _error = -ERESTARTSYS; \
- } \
- } \
- -_error; \
+void linux_wake_up(wait_queue_head_t *, unsigned int, int, bool);
+
+#define wake_up(wqh) \
+ linux_wake_up(wqh, TASK_NORMAL, 1, false)
+#define wake_up_all(wqh) \
+ linux_wake_up(wqh, TASK_NORMAL, 0, false)
+#define wake_up_locked(wqh) \
+ linux_wake_up(wqh, TASK_NORMAL, 1, true)
+#define wake_up_all_locked(wqh) \
+ linux_wake_up(wqh, TASK_NORMAL, 0, true)
+#define wake_up_interruptible(wqh) \
+ linux_wake_up(wqh, TASK_INTERRUPTIBLE, 1, false)
+#define wake_up_interruptible_all(wqh) \
+ linux_wake_up(wqh, TASK_INTERRUPTIBLE, 0, false)
+
+int linux_wait_event_common(wait_queue_head_t *, wait_queue_t *, int,
+ unsigned int, spinlock_t *);
+
+/*
+ * Returns -ERESTARTSYS for a signal, 0 if cond is false after timeout, 1 if
+ * cond is true after timeout, remaining jiffies (> 0) if cond is true before
+ * timeout.
+ */
+#define __wait_event_common(wqh, cond, timeout, state, lock) ({ \
+ DEFINE_WAIT(__wq); \
+ const int __timeout = (timeout) < 1 ? 1 : (timeout); \
+ int __start = ticks; \
+ int __ret = 0; \
+ \
+ for (;;) { \
+ linux_prepare_to_wait(&(wqh), &__wq, state); \
+ if (cond) { \
+ __ret = 1; \
+ break; \
+ } \
+ __ret = linux_wait_event_common(&(wqh), &__wq, \
+ __timeout, state, lock); \
+ if (__ret != 0) \
+ break; \
+ } \
+ linux_finish_wait(&(wqh), &__wq); \
+ if (__timeout != MAX_SCHEDULE_TIMEOUT) { \
+ if (__ret == -EWOULDBLOCK) \
+ __ret = !!(cond); \
+ else if (__ret != -ERESTARTSYS) { \
+ __ret = __timeout + __start - ticks; \
+ /* range check return value */ \
+ if (__ret < 1) \
+ __ret = 1; \
+ else if (__ret > __timeout) \
+ __ret = __timeout; \
+ } \
+ } \
+ __ret; \
})
-#define wait_event_interruptible_timeout(q, cond, timeout) \
-({ \
- void *c = &(q).wchan; \
- long end = jiffies + timeout; \
- int __ret = 0; \
- int __rc = 0; \
+#define wait_event(wqh, cond) ({ \
+ __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \
+ TASK_UNINTERRUPTIBLE, NULL); \
+})
+
+#define wait_event_timeout(wqh, cond, timeout) ({ \
+ __wait_event_common(wqh, cond, timeout, TASK_UNINTERRUPTIBLE, \
+ NULL); \
+})
+
+#define wait_event_interruptible(wqh, cond) ({ \
+ __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \
+ TASK_INTERRUPTIBLE, NULL); \
+})
+
+#define wait_event_interruptible_timeout(wqh, cond, timeout) ({ \
+ __wait_event_common(wqh, cond, timeout, TASK_INTERRUPTIBLE, \
+ NULL); \
+})
+
+/*
+ * Wait queue is already locked.
+ */
+#define wait_event_interruptible_locked(wqh, cond) ({ \
+ int __ret; \
\
- if (!(cond)) { \
- for (; __rc == 0;) { \
- if (SCHEDULER_STOPPED()) \
- break; \
- sleepq_lock(c); \
- if (cond) { \
- sleepq_release(c); \
- __ret = 1; \
- break; \
- } \
- sleepq_add(c, NULL, "completion", \
- SLEEPQ_SLEEP | SLEEPQ_INTERRUPTIBLE, 0); \
- sleepq_set_timeout(c, linux_timer_jiffies_until(end));\
- __rc = sleepq_timedwait_sig (c, 0); \
- if (__rc != 0) { \
- /* check for timeout or signal. \
- * 0 if the condition evaluated to false\
- * after the timeout elapsed, 1 if the \
- * condition evaluated to true after the\
- * timeout elapsed. \
- */ \
- if (__rc == EWOULDBLOCK) \
- __ret = (cond); \
- else \
- __ret = -ERESTARTSYS; \
- } \
- \
- } \
- } else { \
- /* return remaining jiffies (at least 1) if the \
- * condition evaluated to true before the timeout \
- * elapsed. \
- */ \
- __ret = (end - jiffies); \
- if( __ret < 1 ) \
- __ret = 1; \
- } \
+ spin_unlock(&(wqh).lock); \
+ __ret = __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \
+ TASK_INTERRUPTIBLE, NULL); \
+ spin_lock(&(wqh).lock); \
__ret; \
})
+/*
+ * Hold the (locked) spinlock when testing the cond.
+ */
+#define wait_event_interruptible_lock_irq(wqh, cond, lock) ({ \
+ __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \
+ TASK_INTERRUPTIBLE, &(lock)); \
+})
-static inline int
-waitqueue_active(wait_queue_head_t *q)
+static inline void
+__add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
{
- return 0; /* XXX: not really implemented */
+ list_add(&wq->task_list, &wqh->task_list);
}
-#define DEFINE_WAIT(name) \
- wait_queue_t name = {}
+static inline void
+add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
+{
+ spin_lock(&wqh->lock);
+ __add_wait_queue(wqh, wq);
+ spin_unlock(&wqh->lock);
+}
+
static inline void
-prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
+__add_wait_queue_tail(wait_queue_head_t *wqh, wait_queue_t *wq)
{
+ list_add_tail(&wq->task_list, &wqh->task_list);
}
static inline void
-finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
+__remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
{
+ list_del(&wq->task_list);
}
-#endif /* _LINUX_WAIT_H_ */
+static inline void
+remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
+{
+
+ spin_lock(&wqh->lock);
+ __remove_wait_queue(wqh, wq);
+ spin_unlock(&wqh->lock);
+}
+
+bool linux_waitqueue_active(wait_queue_head_t *);
+
+#define waitqueue_active(wqh) linux_waitqueue_active(wqh)
+
+void linux_prepare_to_wait(wait_queue_head_t *, wait_queue_t *, int);
+void linux_finish_wait(wait_queue_head_t *, wait_queue_t *);
+
+#define prepare_to_wait(wqh, wq, state) linux_prepare_to_wait(wqh, wq, state)
+#define finish_wait(wqh, wq) linux_finish_wait(wqh, wq)
+
+void linux_wake_up_bit(void *, int);
+int linux_wait_on_bit_timeout(unsigned long *, int, unsigned int, int);
+void linux_wake_up_atomic_t(atomic_t *);
+int linux_wait_on_atomic_t(atomic_t *, unsigned int);
+
+#define wake_up_bit(word, bit) linux_wake_up_bit(word, bit)
+#define wait_on_bit_timeout(word, bit, state, timeout) \
+ linux_wait_on_bit_timeout(word, bit, state, timeout)
+#define wake_up_atomic_t(a) linux_wake_up_atomic_t(a)
+/*
+ * All existing callers have a cb that just schedule()s. To avoid adding
+ * complexity, just emulate that internally. The prototype is different so that
+ * callers must be manually modified; a cb that does something other than call
+ * schedule() will require special treatment.
+ */
+#define wait_on_atomic_t(a, state) linux_wait_on_atomic_t(a, state)
+
+struct task_struct;
+bool linux_wake_up_state(struct task_struct *, unsigned int);
+
+#define wake_up_process(task) linux_wake_up_state(task, TASK_NORMAL)
+#define wake_up_state(task, state) linux_wake_up_state(task, state)
+
+#endif /* _LINUX_WAIT_H_ */
Index: head/sys/compat/linuxkpi/common/src/linux_kthread.c
===================================================================
--- head/sys/compat/linuxkpi/common/src/linux_kthread.c
+++ head/sys/compat/linuxkpi/common/src/linux_kthread.c
@@ -27,9 +27,10 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+#include <linux/compat.h>
#include <linux/kthread.h>
#include <linux/sched.h>
-#include <linux/compat.h>
+#include <linux/wait.h>
#include <sys/bus.h>
#include <sys/interrupt.h>
Index: head/sys/compat/linuxkpi/common/src/linux_schedule.c
===================================================================
--- head/sys/compat/linuxkpi/common/src/linux_schedule.c
+++ head/sys/compat/linuxkpi/common/src/linux_schedule.c
@@ -0,0 +1,391 @@
+/*-
+ * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conds
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conds, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conds and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/signalvar.h>
+#include <sys/sleepqueue.h>
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+static int
+linux_add_to_sleepqueue(void *wchan, const char *wmesg, int timeout, int state)
+{
+ int flags, ret;
+
+ MPASS((state & ~TASK_NORMAL) == 0);
+
+ flags = SLEEPQ_SLEEP | ((state & TASK_INTERRUPTIBLE) != 0 ?
+ SLEEPQ_INTERRUPTIBLE : 0);
+
+ sleepq_add(wchan, NULL, wmesg, flags, 0);
+ if (timeout != 0)
+ sleepq_set_timeout(wchan, timeout);
+ if ((state & TASK_INTERRUPTIBLE) != 0) {
+ if (timeout == 0)
+ ret = -sleepq_wait_sig(wchan, 0);
+ else
+ ret = -sleepq_timedwait_sig(wchan, 0);
+ } else {
+ if (timeout == 0) {
+ sleepq_wait(wchan, 0);
+ ret = 0;
+ } else
+ ret = -sleepq_timedwait(wchan, 0);
+ }
+ /* filter return value */
+ if (ret != 0 && ret != -EWOULDBLOCK)
+ ret = -ERESTARTSYS;
+ return (ret);
+}
+
+static int
+wake_up_task(struct task_struct *task, unsigned int state)
+{
+ int ret, wakeup_swapper;
+
+ ret = wakeup_swapper = 0;
+ sleepq_lock(task);
+ if ((atomic_load_acq_int(&task->state) & state) != 0) {
+ set_task_state(task, TASK_WAKING);
+ wakeup_swapper = sleepq_signal(task, SLEEPQ_SLEEP, 0, 0);
+ ret = 1;
+ }
+ sleepq_release(task);
+ if (wakeup_swapper)
+ kick_proc0();
+ return (ret);
+}
+
+bool
+linux_signal_pending(struct task_struct *task)
+{
+ struct thread *td;
+ sigset_t pending;
+
+ td = task->task_thread;
+ PROC_LOCK(td->td_proc);
+ pending = td->td_siglist;
+ SIGSETOR(pending, td->td_proc->p_siglist);
+ SIGSETNAND(pending, td->td_sigmask);
+ PROC_UNLOCK(td->td_proc);
+ return (!SIGISEMPTY(pending));
+}
+
+bool
+linux_fatal_signal_pending(struct task_struct *task)
+{
+ struct thread *td;
+ bool ret;
+
+ td = task->task_thread;
+ PROC_LOCK(td->td_proc);
+ ret = SIGISMEMBER(td->td_siglist, SIGKILL) ||
+ SIGISMEMBER(td->td_proc->p_siglist, SIGKILL);
+ PROC_UNLOCK(td->td_proc);
+ return (ret);
+}
+
+bool
+linux_signal_pending_state(long state, struct task_struct *task)
+{
+
+ MPASS((state & ~TASK_NORMAL) == 0);
+
+ if ((state & TASK_INTERRUPTIBLE) == 0)
+ return (false);
+ return (linux_signal_pending(task));
+}
+
+void
+linux_send_sig(int signo, struct task_struct *task)
+{
+ struct thread *td;
+
+ td = task->task_thread;
+ PROC_LOCK(td->td_proc);
+ tdsignal(td, signo);
+ PROC_UNLOCK(td->td_proc);
+}
+
+int
+autoremove_wake_function(wait_queue_t *wq, unsigned int state, int flags,
+ void *key __unused)
+{
+ struct task_struct *task;
+ int ret;
+
+ task = wq->private;
+ if ((ret = wake_up_task(task, state)) != 0)
+ list_del_init(&wq->task_list);
+ return (ret);
+}
+
+void
+linux_wake_up(wait_queue_head_t *wqh, unsigned int state, int nr, bool locked)
+{
+ wait_queue_t *pos, *next;
+
+ if (!locked)
+ spin_lock(&wqh->lock);
+ list_for_each_entry_safe(pos, next, &wqh->task_list, task_list) {
+ if (pos->func == NULL) {
+ if (wake_up_task(pos->private, state) != 0 && --nr == 0)
+ break;
+ } else {
+ if (pos->func(pos, state, 0, NULL) != 0 && --nr == 0)
+ break;
+ }
+ }
+ if (!locked)
+ spin_unlock(&wqh->lock);
+}
+
+void
+linux_prepare_to_wait(wait_queue_head_t *wqh, wait_queue_t *wq, int state)
+{
+
+ spin_lock(&wqh->lock);
+ if (list_empty(&wq->task_list))
+ __add_wait_queue(wqh, wq);
+ set_task_state(current, state);
+ spin_unlock(&wqh->lock);
+}
+
+void
+linux_finish_wait(wait_queue_head_t *wqh, wait_queue_t *wq)
+{
+
+ spin_lock(&wqh->lock);
+ set_task_state(current, TASK_RUNNING);
+ if (!list_empty(&wq->task_list)) {
+ __remove_wait_queue(wqh, wq);
+ INIT_LIST_HEAD(&wq->task_list);
+ }
+ spin_unlock(&wqh->lock);
+}
+
+bool
+linux_waitqueue_active(wait_queue_head_t *wqh)
+{
+ bool ret;
+
+ spin_lock(&wqh->lock);
+ ret = !list_empty(&wqh->task_list);
+ spin_unlock(&wqh->lock);
+ return (ret);
+}
+
+int
+linux_wait_event_common(wait_queue_head_t *wqh, wait_queue_t *wq, int timeout,
+ unsigned int state, spinlock_t *lock)
+{
+ struct task_struct *task;
+ long ret;
+
+ if (lock != NULL)
+ spin_unlock_irq(lock);
+
+ DROP_GIANT();
+
+ task = current;
+
+ /*
+ * Our wait queue entry is on the stack - make sure it doesn't
+ * get swapped out while we sleep.
+ */
+#ifndef NO_SWAPPING
+ PHOLD(task->task_thread->td_proc);
+#endif
+ sleepq_lock(task);
+ if (atomic_load_acq_int(&task->state) != TASK_WAKING) {
+ ret = linux_add_to_sleepqueue(task, "wevent", timeout, state);
+ } else {
+ sleepq_release(task);
+ ret = linux_signal_pending_state(state, task) ? -ERESTARTSYS : 0;
+ }
+#ifndef NO_SWAPPING
+ PRELE(task->task_thread->td_proc);
+#endif
+
+ PICKUP_GIANT();
+
+ if (lock != NULL)
+ spin_lock_irq(lock);
+ return (ret);
+}
+
+int
+linux_schedule_timeout(int timeout)
+{
+ struct task_struct *task;
+ int state;
+ int remainder;
+
+ task = current;
+
+ /* range check timeout */
+ if (timeout < 1)
+ timeout = 1;
+ else if (timeout == MAX_SCHEDULE_TIMEOUT)
+ timeout = 0;
+
+ remainder = ticks + timeout;
+
+ DROP_GIANT();
+
+ sleepq_lock(task);
+ state = atomic_load_acq_int(&task->state);
+ if (state != TASK_WAKING)
+ (void)linux_add_to_sleepqueue(task, "sched", timeout, state);
+ else
+ sleepq_release(task);
+ set_task_state(task, TASK_RUNNING);
+
+ PICKUP_GIANT();
+
+ if (timeout == 0)
+ return (MAX_SCHEDULE_TIMEOUT);
+
+ /* range check return value */
+ remainder -= ticks;
+ if (remainder < 0)
+ remainder = 0;
+ else if (remainder > timeout)
+ remainder = timeout;
+ return (remainder);
+}
+
+static void
+wake_up_sleepers(void *wchan)
+{
+ int wakeup_swapper;
+
+ sleepq_lock(wchan);
+ wakeup_swapper = sleepq_signal(wchan, SLEEPQ_SLEEP, 0, 0);
+ sleepq_release(wchan);
+ if (wakeup_swapper)
+ kick_proc0();
+}
+
+#define bit_to_wchan(word, bit) ((void *)(((uintptr_t)(word) << 6) | (bit)))
+
+void
+linux_wake_up_bit(void *word, int bit)
+{
+
+ wake_up_sleepers(bit_to_wchan(word, bit));
+}
+
+int
+linux_wait_on_bit_timeout(unsigned long *word, int bit, unsigned int state,
+ int timeout)
+{
+ struct task_struct *task;
+ void *wchan;
+ int ret;
+
+ DROP_GIANT();
+
+ /* range check timeout */
+ if (timeout < 1)
+ timeout = 1;
+ else if (timeout == MAX_SCHEDULE_TIMEOUT)
+ timeout = 0;
+
+ task = current;
+ wchan = bit_to_wchan(word, bit);
+ for (;;) {
+ sleepq_lock(wchan);
+ if ((*word & (1 << bit)) == 0) {
+ sleepq_release(wchan);
+ ret = 0;
+ break;
+ }
+ set_task_state(task, state);
+ ret = linux_add_to_sleepqueue(wchan, "wbit", timeout, state);
+ if (ret != 0)
+ break;
+ }
+ set_task_state(task, TASK_RUNNING);
+
+ PICKUP_GIANT();
+
+ return (ret);
+}
+
+void
+linux_wake_up_atomic_t(atomic_t *a)
+{
+
+ wake_up_sleepers(a);
+}
+
+int
+linux_wait_on_atomic_t(atomic_t *a, unsigned int state)
+{
+ struct task_struct *task;
+ void *wchan;
+ int ret;
+
+ DROP_GIANT();
+
+ task = current;
+ wchan = a;
+ for (;;) {
+ sleepq_lock(wchan);
+ if (atomic_read(a) == 0) {
+ sleepq_release(wchan);
+ ret = 0;
+ break;
+ }
+ set_task_state(task, state);
+ ret = linux_add_to_sleepqueue(wchan, "watomic", 0, state);
+ if (ret != 0)
+ break;
+ }
+ set_task_state(task, TASK_RUNNING);
+
+ PICKUP_GIANT();
+
+ return (ret);
+}
+
+bool
+linux_wake_up_state(struct task_struct *task, unsigned int state)
+{
+
+ return (wake_up_task(task, state) != 0);
+}
Index: head/sys/conf/files
===================================================================
--- head/sys/conf/files
+++ head/sys/conf/files
@@ -4281,6 +4281,8 @@
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_rcu.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C} -I$S/contrib/ck/include"
+compat/linuxkpi/common/src/linux_schedule.c optional compat_linuxkpi \
+ compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_slab.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_usb.c optional compat_linuxkpi usb \
Index: head/sys/modules/linuxkpi/Makefile
===================================================================
--- head/sys/modules/linuxkpi/Makefile
+++ head/sys/modules/linuxkpi/Makefile
@@ -11,6 +11,7 @@
linux_pci.c \
linux_radix.c \
linux_rcu.c \
+ linux_schedule.c \
linux_slab.c \
linux_tasklet.c \
linux_idr.c \
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Mon, Oct 27, 8:38 AM (16 h, 53 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
24231239
Default Alt Text
D10986.id29394.diff (24 KB)
Attached To
Mode
D10986: Reimplement the wait_queue and schedule() APIs.
Attached
Detach File
Event Timeline
Log In to Comment