Page MenuHomeFreeBSD

D27171.id79624.diff
No OneTemporary

D27171.id79624.diff

Index: sys/compat/linuxkpi/common/include/linux/irq_work.h
===================================================================
--- sys/compat/linuxkpi/common/include/linux/irq_work.h
+++ sys/compat/linuxkpi/common/include/linux/irq_work.h
@@ -31,22 +31,37 @@
#ifndef __LINUX_IRQ_WORK_H__
#define __LINUX_IRQ_WORK_H__
-#include <linux/workqueue.h>
+#include <sys/param.h>
+#include <sys/taskqueue.h>
+
+struct irq_work;
+typedef void (*irq_work_func_t)(struct irq_work *);
struct irq_work {
- struct work_struct work;
+ struct task irq_task;
+ irq_work_func_t func;
};
+extern struct taskqueue *linux_irq_work_tq;
+
+#define DEFINE_IRQ_WORK(name, _func) struct irq_work name = { \
+ .irq_task = TASK_INITIALIZER(0, linux_irq_work_fn, &(name)), \
+ .func = (_func), \
+}
+
+void linux_irq_work_fn(void *, int);
+
static inline void
-init_irq_work(struct irq_work *irqw, void (*func)(struct irq_work *))
+init_irq_work(struct irq_work *irqw, irq_work_func_t func)
{
- INIT_WORK(&irqw->work, (work_func_t)func);
+ TASK_INIT(&irqw->irq_task, 0, linux_irq_work_fn, irqw);
+ irqw->func = func;
}
static inline void
irq_work_queue(struct irq_work *irqw)
{
- schedule_work(&irqw->work);
+ taskqueue_enqueue(linux_irq_work_tq, &irqw->irq_task);
}
#endif /* __LINUX_IRQ_WORK_H__ */
Index: sys/compat/linuxkpi/common/include/linux/llist.h
===================================================================
--- /dev/null
+++ sys/compat/linuxkpi/common/include/linux/llist.h
@@ -0,0 +1,101 @@
+/* Public domain. */
+
+#ifndef _LINUX_LLIST_H
+#define _LINUX_LLIST_H
+
+#include <sys/types.h>
+#include <machine/atomic.h>
+
+struct llist_node {
+ struct llist_node *next;
+};
+
+struct llist_head {
+ struct llist_node *first;
+};
+
+#define LLIST_HEAD_INIT(name) { NULL }
+#define LLIST_HEAD(name) struct llist_head name = LLIST_HEAD_INIT(name)
+
+#define llist_entry(ptr, type, member) \
+ ((ptr) ? container_of(ptr, type, member) : NULL)
+
+static inline struct llist_node *
+llist_del_all(struct llist_head *head)
+{
+ return ((void *)atomic_readandclear_ptr((uintptr_t *)&head->first));
+}
+
+static inline struct llist_node *
+llist_del_first(struct llist_head *head)
+{
+ struct llist_node *first, *next;
+
+ do {
+ first = head->first;
+ if (first == NULL)
+ return NULL;
+ next = first->next;
+ } while (atomic_cmpset_ptr((uintptr_t *)&head->first,
+ (uintptr_t)first, (uintptr_t)next) == 0);
+
+ return (first);
+}
+
+static inline bool
+llist_add(struct llist_node *new, struct llist_head *head)
+{
+ struct llist_node *first;
+
+ do {
+ new->next = first = head->first;
+ } while (atomic_cmpset_ptr((uintptr_t *)&head->first,
+ (uintptr_t)first, (uintptr_t)new) == 0);
+
+ return (first == NULL);
+}
+
+static inline bool
+llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
+ struct llist_head *head)
+{
+ struct llist_node *first;
+
+ do {
+ new_last->next = first = head->first;
+ } while (atomic_cmpset_ptr((uintptr_t *)&head->first,
+ (uintptr_t)first, (uintptr_t)new_first) == 0);
+
+ return (first == NULL);
+}
+
+static inline void
+init_llist_head(struct llist_head *head)
+{
+ head->first = NULL;
+}
+
+static inline bool
+llist_empty(struct llist_head *head)
+{
+ return (head->first == NULL);
+}
+
+#define llist_for_each_safe(pos, n, node) \
+ for ((pos) = (node); \
+ (pos) != NULL && \
+ ((n) = (pos)->next, pos); \
+ (pos) = (n))
+
+#define llist_for_each_entry_safe(pos, n, node, member) \
+ for (pos = llist_entry((node), __typeof(*pos), member); \
+ pos != NULL && \
+ (n = llist_entry(pos->member.next, __typeof(*pos), member), pos); \
+ pos = n)
+
+#define llist_for_each_entry(pos, node, member) \
+ for ((pos) = llist_entry((node), __typeof(*(pos)), member); \
+ (pos) != NULL; \
+ (pos) = llist_entry((pos)->member.next, __typeof(*(pos)), member))
+
+#endif
Index: sys/compat/linuxkpi/common/include/linux/slab.h
===================================================================
--- sys/compat/linuxkpi/common/include/linux/slab.h
+++ sys/compat/linuxkpi/common/include/linux/slab.h
@@ -35,10 +35,12 @@
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/limits.h>
+#include <sys/proc.h>
#include <vm/uma.h>
#include <linux/types.h>
#include <linux/gfp.h>
+#include <linux/llist.h>
MALLOC_DECLARE(M_KMALLOC);
@@ -90,6 +92,19 @@
#define ARCH_KMALLOC_MINALIGN \
__alignof(unsigned long long)
+/*
+ * Critical section-friendly version of kfree().
+ * Requires knowledge of the allocation size at build time.
+ */
+#define kfree_async(ptr) do { \
+ _Static_assert(sizeof(*(ptr)) >= sizeof(struct llist_node), \
+ "Size of object to free is unknown or too small"); \
+ if (curthread->td_critnest != 0) \
+ linux_kfree_async(ptr); \
+ else \
+ kfree(ptr); \
+} while (0)
+
static inline gfp_t
linux_check_m_flags(gfp_t flags)
{
@@ -189,5 +204,6 @@
}
extern void linux_kmem_cache_destroy(struct linux_kmem_cache *);
+void linux_kfree_async(void *);
#endif /* _LINUX_SLAB_H_ */
Index: sys/compat/linuxkpi/common/src/linux_slab.c
===================================================================
--- sys/compat/linuxkpi/common/src/linux_slab.c
+++ sys/compat/linuxkpi/common/src/linux_slab.c
@@ -30,6 +30,11 @@
#include <linux/slab.h>
#include <linux/rcupdate.h>
#include <linux/kernel.h>
+#include <linux/irq_work.h>
+#include <linux/llist.h>
+
+#include <sys/param.h>
+#include <sys/taskqueue.h>
struct linux_kmem_rcu {
struct rcu_head rcu_head;
@@ -44,6 +49,8 @@
((void *)((char *)(r) + sizeof(struct linux_kmem_rcu) - \
(r)->cache->cache_size))
+static LLIST_HEAD(linux_kfree_async_list);
+
static int
linux_kmem_ctor(void *mem, int size, void *arg, int flags)
{
@@ -126,3 +133,23 @@
uma_zdestroy(c->cache_zone);
free(c, M_KMALLOC);
}
+
+static void
+linux_kfree_async_fn(void *context, int pending)
+{
+ struct llist_node *freed;
+
+ while((freed = llist_del_first(&linux_kfree_async_list)) != NULL)
+ kfree(freed);
+}
+static struct task linux_kfree_async_task =
+ TASK_INITIALIZER(0, linux_kfree_async_fn, &linux_kfree_async_task);
+
+void
+linux_kfree_async(void *addr)
+{
+ if (addr == NULL)
+ return;
+ llist_add(addr, &linux_kfree_async_list);
+ taskqueue_enqueue(linux_irq_work_tq, &linux_kfree_async_task);
+}
Index: sys/compat/linuxkpi/common/src/linux_work.c
===================================================================
--- sys/compat/linuxkpi/common/src/linux_work.c
+++ sys/compat/linuxkpi/common/src/linux_work.c
@@ -32,6 +32,7 @@
#include <linux/compat.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
+#include <linux/irq_work.h>
#include <sys/kernel.h>
@@ -59,6 +60,8 @@
struct workqueue_struct *system_highpri_wq;
struct workqueue_struct *system_power_efficient_wq;
+struct taskqueue *linux_irq_work_tq;
+
static int linux_default_wq_cpus = 4;
static void linux_delayed_work_timer_fn(void *);
@@ -683,3 +686,48 @@
system_highpri_wq = NULL;
}
SYSUNINIT(linux_work_uninit, SI_SUB_TASKQ, SI_ORDER_THIRD, linux_work_uninit, NULL);
+
+void
+linux_irq_work_fn(void *context, int pending)
+{
+ struct irq_work *irqw = context;
+
+ irqw->func(irqw);
+}
+
+static void
+linux_irq_work_init_fn(void *context, int pending)
+{
+ /*
+ * LinuxKPI performs lazy allocation of memory structures required by
+ * current on the first access to it. As some irq_work clients read
+ * it with spinlock taken, we have to preallocate td_lkpi_task before
+ * first call to irq_work_queue(). As irq_work uses a single thread,
+ * it is enough to read current once at SYSINIT stage.
+ */
+ if (current == NULL)
+ panic("irq_work taskqueue is not initialized");
+}
+static struct task linux_irq_work_init_task =
+ TASK_INITIALIZER(0, linux_irq_work_init_fn, &linux_irq_work_init_task);
+
+static void
+linux_irq_work_init(void *arg)
+{
+ linux_irq_work_tq = taskqueue_create_fast("linuxkpi_irq_wq",
+ M_WAITOK, taskqueue_thread_enqueue, &linux_irq_work_tq);
+ taskqueue_start_threads(&linux_irq_work_tq, 1, PWAIT,
+ "linuxkpi_irq_wq");
+ taskqueue_enqueue(linux_irq_work_tq, &linux_irq_work_init_task);
+}
+SYSINIT(linux_irq_work_init, SI_SUB_TASKQ, SI_ORDER_SECOND,
+ linux_irq_work_init, NULL);
+
+static void
+linux_irq_work_uninit(void *arg)
+{
+ taskqueue_drain_all(linux_irq_work_tq);
+ taskqueue_free(linux_irq_work_tq);
+}
+SYSUNINIT(linux_irq_work_uninit, SI_SUB_TASKQ, SI_ORDER_SECOND,
+ linux_irq_work_uninit, NULL);

File Metadata

Mime Type
text/plain
Expires
Sat, Feb 22, 4:06 PM (1 h, 52 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
16774541
Default Alt Text
D27171.id79624.diff (8 KB)

Event Timeline