Index: head/sys/compat/linuxkpi/common/include/linux/interrupt.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/interrupt.h (revision 347851) +++ head/sys/compat/linuxkpi/common/include/linux/interrupt.h (revision 347852) @@ -1,207 +1,213 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2015 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_INTERRUPT_H_ #define _LINUX_INTERRUPT_H_ #include #include #include #include #include typedef irqreturn_t (*irq_handler_t)(int, void *); #define IRQF_SHARED RF_SHAREABLE struct irq_ent { struct list_head links; struct device *dev; struct resource *res; void *arg; irqreturn_t (*handler)(int, void *); void *tag; unsigned int irq; }; static inline int linux_irq_rid(struct device *dev, unsigned int irq) { if (irq == dev->irq) return (0); return irq - dev->msix + 1; } extern void linux_irq_handler(void *); static inline struct irq_ent * linux_irq_ent(struct device *dev, unsigned int irq) { struct irq_ent *irqe; list_for_each_entry(irqe, &dev->irqents, links) if (irqe->irq == irq) return (irqe); return (NULL); } static inline int request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *name, void *arg) { struct resource *res; struct irq_ent *irqe; struct device *dev; int error; int rid; dev = linux_pci_find_irq_dev(irq); if (dev == NULL) return -ENXIO; rid = linux_irq_rid(dev, irq); res = bus_alloc_resource_any(dev->bsddev, SYS_RES_IRQ, &rid, flags | RF_ACTIVE); if (res == NULL) return (-ENXIO); irqe = kmalloc(sizeof(*irqe), GFP_KERNEL); irqe->dev = dev; irqe->res = res; irqe->arg = arg; irqe->handler = handler; irqe->irq = irq; error = bus_setup_intr(dev->bsddev, res, INTR_TYPE_NET | INTR_MPSAFE, NULL, linux_irq_handler, irqe, &irqe->tag); if (error) { bus_release_resource(dev->bsddev, SYS_RES_IRQ, rid, irqe->res); kfree(irqe); return (-error); } list_add(&irqe->links, &dev->irqents); return 0; } static inline int enable_irq(unsigned int irq) { struct irq_ent *irqe; struct device *dev; dev = linux_pci_find_irq_dev(irq); if (dev == NULL) return -EINVAL; irqe = linux_irq_ent(dev, irq); if (irqe == NULL || irqe->tag != NULL) return -EINVAL; return -bus_setup_intr(dev->bsddev, irqe->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, linux_irq_handler, irqe, &irqe->tag); } static inline void disable_irq(unsigned int irq) { struct irq_ent *irqe; struct device *dev; dev = linux_pci_find_irq_dev(irq); if (dev == NULL) return; irqe = linux_irq_ent(dev, irq); if (irqe == NULL) return; if (irqe->tag != NULL) bus_teardown_intr(dev->bsddev, irqe->res, irqe->tag); irqe->tag = NULL; } static inline int bind_irq_to_cpu(unsigned int irq, int cpu_id) { struct irq_ent *irqe; struct device *dev; dev = linux_pci_find_irq_dev(irq); if (dev == NULL) return (-ENOENT); irqe = linux_irq_ent(dev, irq); if (irqe == NULL) return (-ENOENT); return (-bus_bind_intr(dev->bsddev, irqe->res, cpu_id)); } static inline void free_irq(unsigned int irq, void *device) { struct irq_ent *irqe; struct device *dev; int rid; dev = linux_pci_find_irq_dev(irq); if (dev == NULL) return; rid = linux_irq_rid(dev, irq); irqe = linux_irq_ent(dev, irq); if (irqe == NULL) return; if (irqe->tag != NULL) bus_teardown_intr(dev->bsddev, irqe->res, irqe->tag); bus_release_resource(dev->bsddev, SYS_RES_IRQ, rid, irqe->res); list_del(&irqe->links); kfree(irqe); } /* * LinuxKPI tasklet support */ typedef void tasklet_func_t(unsigned long); struct tasklet_struct { TAILQ_ENTRY(tasklet_struct) entry; tasklet_func_t *func; + /* Our "state" implementation is different. Avoid same name as Linux. */ + volatile u_int tasklet_state; + atomic_t count; unsigned long data; }; #define DECLARE_TASKLET(name, func, data) \ -struct tasklet_struct name = { { NULL, NULL }, func, data } +struct tasklet_struct name = { { NULL, NULL }, func, ATOMIC_INIT(0), data } #define tasklet_hi_schedule(t) tasklet_schedule(t) extern void tasklet_schedule(struct tasklet_struct *); extern void tasklet_kill(struct tasklet_struct *); extern void tasklet_init(struct tasklet_struct *, tasklet_func_t *, unsigned long data); extern void tasklet_enable(struct tasklet_struct *); extern void tasklet_disable(struct tasklet_struct *); +extern int tasklet_trylock(struct tasklet_struct *); +extern void tasklet_unlock(struct tasklet_struct *); +extern void tasklet_unlock_wait(struct tasklet_struct *ts); #endif /* _LINUX_INTERRUPT_H_ */ Index: head/sys/compat/linuxkpi/common/src/linux_tasklet.c =================================================================== --- head/sys/compat/linuxkpi/common/src/linux_tasklet.c (revision 347851) +++ head/sys/compat/linuxkpi/common/src/linux_tasklet.c (revision 347852) @@ -1,217 +1,256 @@ /*- * Copyright (c) 2017 Hans Petter Selasky * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #define TASKLET_ST_IDLE 0 #define TASKLET_ST_BUSY 1 #define TASKLET_ST_EXEC 2 #define TASKLET_ST_LOOP 3 -#define TASKLET_ST_PAUSED 4 #define TASKLET_ST_CMPSET(ts, old, new) \ - atomic_cmpset_ptr((volatile uintptr_t *)&(ts)->entry.tqe_prev, old, new) + atomic_cmpset_int((volatile u_int *)&(ts)->tasklet_state, old, new) #define TASKLET_ST_SET(ts, new) \ - WRITE_ONCE(*(volatile uintptr_t *)&(ts)->entry.tqe_prev, new) + WRITE_ONCE(*(volatile u_int *)&(ts)->tasklet_state, new) #define TASKLET_ST_GET(ts) \ - READ_ONCE(*(volatile uintptr_t *)&(ts)->entry.tqe_prev) + READ_ONCE(*(volatile u_int *)&(ts)->tasklet_state) +#define TASKLET_ST_TESTANDSET(ts, new) \ + atomic_testandset_int((volatile u_int *)&(ts)->tasklet_state, new) + struct tasklet_worker { struct mtx mtx; - TAILQ_HEAD(, tasklet_struct) head; + TAILQ_HEAD(tasklet_list, tasklet_struct) head; struct grouptask gtask; } __aligned(CACHE_LINE_SIZE); #define TASKLET_WORKER_LOCK(tw) mtx_lock(&(tw)->mtx) #define TASKLET_WORKER_UNLOCK(tw) mtx_unlock(&(tw)->mtx) DPCPU_DEFINE_STATIC(struct tasklet_worker, tasklet_worker); static void tasklet_handler(void *arg) { struct tasklet_worker *tw = (struct tasklet_worker *)arg; struct tasklet_struct *ts; + struct tasklet_struct *last; linux_set_current(curthread); TASKLET_WORKER_LOCK(tw); + last = TAILQ_LAST(&tw->head, tasklet_list); while (1) { ts = TAILQ_FIRST(&tw->head); if (ts == NULL) break; TAILQ_REMOVE(&tw->head, ts, entry); - TASKLET_WORKER_UNLOCK(tw); - do { - /* reset executing state */ - TASKLET_ST_SET(ts, TASKLET_ST_EXEC); + if (!atomic_read(&ts->count)) { + TASKLET_WORKER_UNLOCK(tw); + do { + /* reset executing state */ + TASKLET_ST_SET(ts, TASKLET_ST_EXEC); - ts->func(ts->data); + ts->func(ts->data); - } while (TASKLET_ST_CMPSET(ts, TASKLET_ST_EXEC, TASKLET_ST_IDLE) == 0); - TASKLET_WORKER_LOCK(tw); + } while (TASKLET_ST_CMPSET(ts, TASKLET_ST_EXEC, + TASKLET_ST_IDLE) == 0); + TASKLET_WORKER_LOCK(tw); + } else { + TAILQ_INSERT_TAIL(&tw->head, ts, entry); + } + if (ts == last) + break; } TASKLET_WORKER_UNLOCK(tw); } static void tasklet_subsystem_init(void *arg __unused) { struct tasklet_worker *tw; char buf[32]; int i; CPU_FOREACH(i) { if (CPU_ABSENT(i)) continue; tw = DPCPU_ID_PTR(i, tasklet_worker); mtx_init(&tw->mtx, "linux_tasklet", NULL, MTX_DEF); TAILQ_INIT(&tw->head); GROUPTASK_INIT(&tw->gtask, 0, tasklet_handler, tw); snprintf(buf, sizeof(buf), "softirq%d", i); taskqgroup_attach_cpu(qgroup_softirq, &tw->gtask, "tasklet", i, NULL, NULL, buf); } } SYSINIT(linux_tasklet, SI_SUB_TASKQ, SI_ORDER_THIRD, tasklet_subsystem_init, NULL); static void tasklet_subsystem_uninit(void *arg __unused) { struct tasklet_worker *tw; int i; CPU_FOREACH(i) { if (CPU_ABSENT(i)) continue; tw = DPCPU_ID_PTR(i, tasklet_worker); taskqgroup_detach(qgroup_softirq, &tw->gtask); mtx_destroy(&tw->mtx); } } SYSUNINIT(linux_tasklet, SI_SUB_TASKQ, SI_ORDER_THIRD, tasklet_subsystem_uninit, NULL); void tasklet_init(struct tasklet_struct *ts, tasklet_func_t *func, unsigned long data) { ts->entry.tqe_prev = NULL; ts->entry.tqe_next = NULL; ts->func = func; ts->data = data; + atomic_set_int(&ts->tasklet_state, TASKLET_ST_IDLE); + atomic_set(&ts->count, 0); } void local_bh_enable(void) { sched_unpin(); } void local_bh_disable(void) { sched_pin(); } void tasklet_schedule(struct tasklet_struct *ts) { + /* tasklet is paused */ + if (atomic_read(&ts->count)) + return; + if (TASKLET_ST_CMPSET(ts, TASKLET_ST_EXEC, TASKLET_ST_LOOP)) { /* tasklet_handler() will loop */ } else if (TASKLET_ST_CMPSET(ts, TASKLET_ST_IDLE, TASKLET_ST_BUSY)) { struct tasklet_worker *tw; tw = &DPCPU_GET(tasklet_worker); /* tasklet_handler() was not queued */ TASKLET_WORKER_LOCK(tw); /* enqueue tasklet */ TAILQ_INSERT_TAIL(&tw->head, ts, entry); /* schedule worker */ GROUPTASK_ENQUEUE(&tw->gtask); TASKLET_WORKER_UNLOCK(tw); } else { /* * tasklet_handler() is already executing * * If the state is neither EXEC nor IDLE, it is either * LOOP or BUSY. If the state changed between the two * CMPSET's above the only possible transitions by * elimination are LOOP->EXEC and BUSY->EXEC. If a * EXEC->LOOP transition was missed that is not a * problem because the callback function is then * already about to be called again. */ } } void tasklet_kill(struct tasklet_struct *ts) { WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "tasklet_kill() can sleep"); /* wait until tasklet is no longer busy */ while (TASKLET_ST_GET(ts) != TASKLET_ST_IDLE) pause("W", 1); } void tasklet_enable(struct tasklet_struct *ts) { - (void) TASKLET_ST_CMPSET(ts, TASKLET_ST_PAUSED, TASKLET_ST_IDLE); + + atomic_dec(&ts->count); } void tasklet_disable(struct tasklet_struct *ts) { - while (1) { - if (TASKLET_ST_GET(ts) == TASKLET_ST_PAUSED) - break; - if (TASKLET_ST_CMPSET(ts, TASKLET_ST_IDLE, TASKLET_ST_PAUSED)) - break; + + atomic_inc(&ts->count); + tasklet_unlock_wait(ts); +} + +int +tasklet_trylock(struct tasklet_struct *ts) +{ + + return (!TASKLET_ST_TESTANDSET(ts, TASKLET_ST_BUSY)); +} + +void +tasklet_unlock(struct tasklet_struct *ts) +{ + + TASKLET_ST_SET(ts, TASKLET_ST_IDLE); +} + +void +tasklet_unlock_wait(struct tasklet_struct *ts) +{ + + WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "tasklet_kill() can sleep"); + + /* wait until tasklet is no longer busy */ + while (TASKLET_ST_GET(ts) != TASKLET_ST_IDLE) pause("W", 1); - } }