diff --git a/sys/compat/linuxkpi/common/include/linux/spinlock.h b/sys/compat/linuxkpi/common/include/linux/spinlock.h index 4bc1e2a2d431..3f6eb4bb70f6 100644 --- a/sys/compat/linuxkpi/common/include/linux/spinlock.h +++ b/sys/compat/linuxkpi/common/include/linux/spinlock.h @@ -1,187 +1,173 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _LINUXKPI_LINUX_SPINLOCK_H_ #define _LINUXKPI_LINUX_SPINLOCK_H_ #include #include #include #include #include #include #include #include #include #include -typedef struct { - struct mtx m; -} spinlock_t; +typedef struct mtx spinlock_t; /* * By defining CONFIG_SPIN_SKIP LinuxKPI spinlocks and asserts will be * skipped during panic(). By default it is disabled due to * performance reasons. */ #ifdef CONFIG_SPIN_SKIP #define SPIN_SKIP(void) unlikely(SCHEDULER_STOPPED() || kdb_active) #else #define SPIN_SKIP(void) 0 #endif #define spin_lock(_l) do { \ if (SPIN_SKIP()) \ break; \ - mtx_lock(&(_l)->m); \ + mtx_lock(_l); \ local_bh_disable(); \ } while (0) #define spin_lock_bh(_l) do { \ spin_lock(_l); \ local_bh_disable(); \ } while (0) #define spin_lock_irq(_l) do { \ spin_lock(_l); \ } while (0) #define spin_unlock(_l) do { \ if (SPIN_SKIP()) \ break; \ local_bh_enable(); \ - mtx_unlock(&(_l)->m); \ + mtx_unlock(_l); \ } while (0) #define spin_unlock_bh(_l) do { \ local_bh_enable(); \ spin_unlock(_l); \ } while (0) #define spin_unlock_irq(_l) do { \ spin_unlock(_l); \ } while (0) #define spin_trylock(_l) ({ \ int __ret; \ if (SPIN_SKIP()) { \ __ret = 1; \ } else { \ - __ret = mtx_trylock(&(_l)->m); \ + __ret = mtx_trylock(_l); \ if (likely(__ret != 0)) \ local_bh_disable(); \ } \ __ret; \ }) #define spin_trylock_irq(_l) \ spin_trylock(_l) #define spin_trylock_irqsave(_l, flags) ({ \ (flags) = 0; \ spin_trylock(_l); \ }) #define spin_lock_nested(_l, _n) do { \ if (SPIN_SKIP()) \ break; \ - mtx_lock_flags(&(_l)->m, MTX_DUPOK); \ + mtx_lock_flags(_l, MTX_DUPOK); \ local_bh_disable(); \ } while (0) #define spin_lock_irqsave(_l, flags) do { \ (flags) = 0; \ spin_lock(_l); \ } while (0) #define spin_lock_irqsave_nested(_l, flags, _n) do { \ (flags) = 0; \ spin_lock_nested(_l, _n); \ } while (0) #define spin_unlock_irqrestore(_l, flags) do { \ (void)(flags); \ spin_unlock(_l); \ } while (0) #ifdef WITNESS_ALL /* NOTE: the maximum WITNESS name is 64 chars */ #define __spin_lock_name(name, file, line) \ (((const char *){file ":" #line "-" name}) + \ (sizeof(file) > 16 ? sizeof(file) - 16 : 0)) #else #define __spin_lock_name(name, file, line) name #endif #define _spin_lock_name(...) __spin_lock_name(__VA_ARGS__) #define spin_lock_name(name) _spin_lock_name(name, __FILE__, __LINE__) -#define spin_lock_init(lock) linux_spin_lock_init(lock, spin_lock_name("lnxspin")) +#define spin_lock_init(lock) mtx_init(lock, spin_lock_name("lnxspin"), \ + NULL, MTX_DEF | MTX_NOWITNESS) -static inline void -linux_spin_lock_init(spinlock_t *lock, const char *name) -{ - - memset(lock, 0, sizeof(*lock)); - mtx_init(&lock->m, name, NULL, MTX_DEF | MTX_NOWITNESS); -} - -static inline void -spin_lock_destroy(spinlock_t *lock) -{ - - mtx_destroy(&lock->m); -} +#define spin_lock_destroy(_l) mtx_destroy(_l) #define DEFINE_SPINLOCK(lock) \ spinlock_t lock; \ - MTX_SYSINIT(lock, &(lock).m, spin_lock_name("lnxspin"), MTX_DEF) + MTX_SYSINIT(lock, &lock, spin_lock_name("lnxspin"), MTX_DEF) #define assert_spin_locked(_l) do { \ if (SPIN_SKIP()) \ break; \ - mtx_assert(&(_l)->m, MA_OWNED); \ + mtx_assert(_l, MA_OWNED); \ } while (0) #define atomic_dec_and_lock_irqsave(cnt, lock, flags) \ _atomic_dec_and_lock_irqsave(cnt, lock, &(flags)) static inline int _atomic_dec_and_lock_irqsave(atomic_t *cnt, spinlock_t *lock, unsigned long *flags) { if (atomic_add_unless(cnt, -1, 1)) return (0); spin_lock_irqsave(lock, *flags); if (atomic_dec_and_test(cnt)) return (1); spin_unlock_irqrestore(lock, *flags); return (0); } #endif /* _LINUXKPI_LINUX_SPINLOCK_H_ */ diff --git a/sys/compat/linuxkpi/common/include/linux/wait.h b/sys/compat/linuxkpi/common/include/linux/wait.h index d50003d44955..b815050b6faa 100644 --- a/sys/compat/linuxkpi/common/include/linux/wait.h +++ b/sys/compat/linuxkpi/common/include/linux/wait.h @@ -1,311 +1,311 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. * Copyright (c) 2017 Mark Johnston * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _LINUXKPI_LINUX_WAIT_H_ #define _LINUXKPI_LINUX_WAIT_H_ #include #include #include #include #include #include #include #define SKIP_SLEEP() (SCHEDULER_STOPPED() || kdb_active) #define might_sleep() \ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "might_sleep()") #define might_sleep_if(cond) do { \ if (cond) { might_sleep(); } \ } while (0) struct wait_queue; struct wait_queue_head; #define wait_queue_entry wait_queue typedef struct wait_queue wait_queue_t; typedef struct wait_queue_entry wait_queue_entry_t; typedef struct wait_queue_head wait_queue_head_t; typedef int wait_queue_func_t(wait_queue_t *, unsigned int, int, void *); /* * Many API consumers directly reference these fields and those of * wait_queue_head. */ struct wait_queue { unsigned int flags; /* always 0 */ void *private; wait_queue_func_t *func; union { struct list_head task_list; /* < v4.13 */ struct list_head entry; /* >= v4.13 */ }; }; struct wait_queue_head { spinlock_t lock; union { struct list_head task_list; /* < v4.13 */ struct list_head head; /* >= v4.13 */ }; }; /* * This function is referenced by at least one DRM driver, so it may not be * renamed and furthermore must be the default wait queue callback. */ extern wait_queue_func_t autoremove_wake_function; extern wait_queue_func_t default_wake_function; #define DEFINE_WAIT_FUNC(name, function) \ wait_queue_t name = { \ .private = current, \ .func = function, \ .task_list = LINUX_LIST_HEAD_INIT(name.task_list) \ } #define DEFINE_WAIT(name) \ DEFINE_WAIT_FUNC(name, autoremove_wake_function) #define DECLARE_WAITQUEUE(name, task) \ wait_queue_t name = { \ .private = task, \ .task_list = LINUX_LIST_HEAD_INIT(name.task_list) \ } #define DECLARE_WAIT_QUEUE_HEAD(name) \ wait_queue_head_t name = { \ .task_list = LINUX_LIST_HEAD_INIT(name.task_list), \ }; \ MTX_SYSINIT(name, &(name).lock.m, spin_lock_name("wqhead"), MTX_DEF) #define init_waitqueue_head(wqh) do { \ - mtx_init(&(wqh)->lock.m, spin_lock_name("wqhead"), \ + mtx_init(&(wqh)->lock, spin_lock_name("wqhead"), \ NULL, MTX_DEF | MTX_NEW | MTX_NOWITNESS); \ INIT_LIST_HEAD(&(wqh)->task_list); \ } while (0) #define __init_waitqueue_head(wqh, name, lk) init_waitqueue_head(wqh) void linux_init_wait_entry(wait_queue_t *, int); void linux_wake_up(wait_queue_head_t *, unsigned int, int, bool); #define init_wait_entry(wq, flags) \ linux_init_wait_entry(wq, flags) #define wake_up(wqh) \ linux_wake_up(wqh, TASK_NORMAL, 1, false) #define wake_up_all(wqh) \ linux_wake_up(wqh, TASK_NORMAL, 0, false) #define wake_up_locked(wqh) \ linux_wake_up(wqh, TASK_NORMAL, 1, true) #define wake_up_all_locked(wqh) \ linux_wake_up(wqh, TASK_NORMAL, 0, true) #define wake_up_interruptible(wqh) \ linux_wake_up(wqh, TASK_INTERRUPTIBLE, 1, false) #define wake_up_interruptible_all(wqh) \ linux_wake_up(wqh, TASK_INTERRUPTIBLE, 0, false) int linux_wait_event_common(wait_queue_head_t *, wait_queue_t *, int, unsigned int, spinlock_t *); /* * Returns -ERESTARTSYS for a signal, 0 if cond is false after timeout, 1 if * cond is true after timeout, remaining jiffies (> 0) if cond is true before * timeout. */ #define __wait_event_common(wqh, cond, timeout, state, lock) ({ \ DEFINE_WAIT(__wq); \ const int __timeout = ((int)(timeout)) < 1 ? 1 : (timeout); \ int __start = ticks; \ int __ret = 0; \ \ for (;;) { \ linux_prepare_to_wait(&(wqh), &__wq, state); \ if (cond) \ break; \ __ret = linux_wait_event_common(&(wqh), &__wq, \ __timeout, state, lock); \ if (__ret != 0) \ break; \ } \ linux_finish_wait(&(wqh), &__wq); \ if (__timeout != MAX_SCHEDULE_TIMEOUT) { \ if (__ret == -EWOULDBLOCK) \ __ret = !!(cond); \ else if (__ret != -ERESTARTSYS) { \ __ret = __timeout + __start - ticks; \ /* range check return value */ \ if (__ret < 1) \ __ret = 1; \ else if (__ret > __timeout) \ __ret = __timeout; \ } \ } \ __ret; \ }) #define wait_event(wqh, cond) do { \ (void) __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ TASK_UNINTERRUPTIBLE, NULL); \ } while (0) #define wait_event_timeout(wqh, cond, timeout) ({ \ __wait_event_common(wqh, cond, timeout, TASK_UNINTERRUPTIBLE, \ NULL); \ }) #define wait_event_killable(wqh, cond) ({ \ __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ TASK_INTERRUPTIBLE, NULL); \ }) #define wait_event_interruptible(wqh, cond) ({ \ __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ TASK_INTERRUPTIBLE, NULL); \ }) #define wait_event_interruptible_timeout(wqh, cond, timeout) ({ \ __wait_event_common(wqh, cond, timeout, TASK_INTERRUPTIBLE, \ NULL); \ }) /* * Wait queue is already locked. */ #define wait_event_interruptible_locked(wqh, cond) ({ \ int __ret; \ \ spin_unlock(&(wqh).lock); \ __ret = __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ TASK_INTERRUPTIBLE, NULL); \ spin_lock(&(wqh).lock); \ __ret; \ }) /* * The passed spinlock is held when testing the condition. */ #define wait_event_interruptible_lock_irq(wqh, cond, lock) ({ \ __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ TASK_INTERRUPTIBLE, &(lock)); \ }) /* * The passed spinlock is held when testing the condition. */ #define wait_event_lock_irq(wqh, cond, lock) ({ \ __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ TASK_UNINTERRUPTIBLE, &(lock)); \ }) static inline void __add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) { list_add(&wq->task_list, &wqh->task_list); } static inline void add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) { spin_lock(&wqh->lock); __add_wait_queue(wqh, wq); spin_unlock(&wqh->lock); } static inline void __add_wait_queue_tail(wait_queue_head_t *wqh, wait_queue_t *wq) { list_add_tail(&wq->task_list, &wqh->task_list); } static inline void __add_wait_queue_entry_tail(wait_queue_head_t *wqh, wait_queue_entry_t *wq) { list_add_tail(&wq->entry, &wqh->head); } static inline void __remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) { list_del(&wq->task_list); } static inline void remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) { spin_lock(&wqh->lock); __remove_wait_queue(wqh, wq); spin_unlock(&wqh->lock); } bool linux_waitqueue_active(wait_queue_head_t *); #define waitqueue_active(wqh) linux_waitqueue_active(wqh) void linux_prepare_to_wait(wait_queue_head_t *, wait_queue_t *, int); void linux_finish_wait(wait_queue_head_t *, wait_queue_t *); #define prepare_to_wait(wqh, wq, state) linux_prepare_to_wait(wqh, wq, state) #define finish_wait(wqh, wq) linux_finish_wait(wqh, wq) void linux_wake_up_bit(void *, int); int linux_wait_on_bit_timeout(unsigned long *, int, unsigned int, int); void linux_wake_up_atomic_t(atomic_t *); int linux_wait_on_atomic_t(atomic_t *, unsigned int); #define wake_up_bit(word, bit) linux_wake_up_bit(word, bit) #define wait_on_bit(word, bit, state) \ linux_wait_on_bit_timeout(word, bit, state, MAX_SCHEDULE_TIMEOUT) #define wait_on_bit_timeout(word, bit, state, timeout) \ linux_wait_on_bit_timeout(word, bit, state, timeout) #define wake_up_atomic_t(a) linux_wake_up_atomic_t(a) /* * All existing callers have a cb that just schedule()s. To avoid adding * complexity, just emulate that internally. The prototype is different so that * callers must be manually modified; a cb that does something other than call * schedule() will require special treatment. */ #define wait_on_atomic_t(a, state) linux_wait_on_atomic_t(a, state) struct task_struct; bool linux_wake_up_state(struct task_struct *, unsigned int); #define wake_up_process(task) linux_wake_up_state(task, TASK_NORMAL) #define wake_up_state(task, state) linux_wake_up_state(task, state) #endif /* _LINUXKPI_LINUX_WAIT_H_ */ diff --git a/sys/compat/linuxkpi/common/src/linux_compat.c b/sys/compat/linuxkpi/common/src/linux_compat.c index a6eb7bb17e16..4c639c1f8459 100644 --- a/sys/compat/linuxkpi/common/src/linux_compat.c +++ b/sys/compat/linuxkpi/common/src/linux_compat.c @@ -1,2824 +1,2824 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2021 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include "opt_global.h" #include "opt_stack.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) #include #include #endif #include #ifdef XENHVM #undef xen_pv_domain #undef xen_initial_domain /* xen/xen-os.h redefines __must_check */ #undef __must_check #include #endif SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "LinuxKPI parameters"); int linuxkpi_debug; SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); int linuxkpi_warn_dump_stack = 0; SYSCTL_INT(_compat_linuxkpi, OID_AUTO, warn_dump_stack, CTLFLAG_RWTUN, &linuxkpi_warn_dump_stack, 0, "Set to enable stack traces from WARN_ON(). Clear to disable."); static struct timeval lkpi_net_lastlog; static int lkpi_net_curpps; static int lkpi_net_maxpps = 99; SYSCTL_INT(_compat_linuxkpi, OID_AUTO, net_ratelimit, CTLFLAG_RWTUN, &lkpi_net_maxpps, 0, "Limit number of LinuxKPI net messages per second."); MALLOC_DEFINE(M_KMALLOC, "lkpikmalloc", "Linux kmalloc compat"); #include /* Undo Linux compat changes. */ #undef RB_ROOT #undef file #undef cdev #define RB_ROOT(head) (head)->rbh_root static void linux_destroy_dev(struct linux_cdev *); static void linux_cdev_deref(struct linux_cdev *ldev); static struct vm_area_struct *linux_cdev_handle_find(void *handle); cpumask_t cpu_online_mask; static cpumask_t **static_single_cpu_mask; static cpumask_t *static_single_cpu_mask_lcs; struct kobject linux_class_root; struct device linux_root_device; struct class linux_class_misc; struct list_head pci_drivers; struct list_head pci_devices; spinlock_t pci_lock; struct uts_namespace init_uts_ns; unsigned long linux_timer_hz_mask; wait_queue_head_t linux_bit_waitq; wait_queue_head_t linux_var_waitq; int panic_cmp(struct rb_node *one, struct rb_node *two) { panic("no cmp"); } RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); #define START(node) ((node)->start) #define LAST(node) ((node)->last) INTERVAL_TREE_DEFINE(struct interval_tree_node, rb, unsigned long,, START, LAST,, lkpi_interval_tree) static void linux_device_release(struct device *dev) { pr_debug("linux_device_release: %s\n", dev_name(dev)); kfree(dev); } static ssize_t linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct class_attribute *dattr; ssize_t error; dattr = container_of(attr, struct class_attribute, attr); error = -EIO; if (dattr->show) error = dattr->show(container_of(kobj, struct class, kobj), dattr, buf); return (error); } static ssize_t linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct class_attribute *dattr; ssize_t error; dattr = container_of(attr, struct class_attribute, attr); error = -EIO; if (dattr->store) error = dattr->store(container_of(kobj, struct class, kobj), dattr, buf, count); return (error); } static void linux_class_release(struct kobject *kobj) { struct class *class; class = container_of(kobj, struct class, kobj); if (class->class_release) class->class_release(class); } static const struct sysfs_ops linux_class_sysfs = { .show = linux_class_show, .store = linux_class_store, }; const struct kobj_type linux_class_ktype = { .release = linux_class_release, .sysfs_ops = &linux_class_sysfs }; static void linux_dev_release(struct kobject *kobj) { struct device *dev; dev = container_of(kobj, struct device, kobj); /* This is the precedence defined by linux. */ if (dev->release) dev->release(dev); else if (dev->class && dev->class->dev_release) dev->class->dev_release(dev); } static ssize_t linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct device_attribute *dattr; ssize_t error; dattr = container_of(attr, struct device_attribute, attr); error = -EIO; if (dattr->show) error = dattr->show(container_of(kobj, struct device, kobj), dattr, buf); return (error); } static ssize_t linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct device_attribute *dattr; ssize_t error; dattr = container_of(attr, struct device_attribute, attr); error = -EIO; if (dattr->store) error = dattr->store(container_of(kobj, struct device, kobj), dattr, buf, count); return (error); } static const struct sysfs_ops linux_dev_sysfs = { .show = linux_dev_show, .store = linux_dev_store, }; const struct kobj_type linux_dev_ktype = { .release = linux_dev_release, .sysfs_ops = &linux_dev_sysfs }; struct device * device_create(struct class *class, struct device *parent, dev_t devt, void *drvdata, const char *fmt, ...) { struct device *dev; va_list args; dev = kzalloc(sizeof(*dev), M_WAITOK); dev->parent = parent; dev->class = class; dev->devt = devt; dev->driver_data = drvdata; dev->release = linux_device_release; va_start(args, fmt); kobject_set_name_vargs(&dev->kobj, fmt, args); va_end(args); device_register(dev); return (dev); } struct device * device_create_groups_vargs(struct class *class, struct device *parent, dev_t devt, void *drvdata, const struct attribute_group **groups, const char *fmt, va_list args) { struct device *dev = NULL; int retval = -ENODEV; if (class == NULL || IS_ERR(class)) goto error; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { retval = -ENOMEM; goto error; } dev->devt = devt; dev->class = class; dev->parent = parent; dev->groups = groups; dev->release = device_create_release; /* device_initialize() needs the class and parent to be set */ device_initialize(dev); dev_set_drvdata(dev, drvdata); retval = kobject_set_name_vargs(&dev->kobj, fmt, args); if (retval) goto error; retval = device_add(dev); if (retval) goto error; return dev; error: put_device(dev); return ERR_PTR(retval); } struct class * class_create(struct module *owner, const char *name) { struct class *class; int error; class = kzalloc(sizeof(*class), M_WAITOK); class->owner = owner; class->name = name; class->class_release = linux_class_kfree; error = class_register(class); if (error) { kfree(class); return (NULL); } return (class); } static void linux_kq_lock(void *arg) { spinlock_t *s = arg; spin_lock(s); } static void linux_kq_unlock(void *arg) { spinlock_t *s = arg; spin_unlock(s); } static void linux_kq_assert_lock(void *arg, int what) { #ifdef INVARIANTS spinlock_t *s = arg; if (what == LA_LOCKED) - mtx_assert(&s->m, MA_OWNED); + mtx_assert(s, MA_OWNED); else - mtx_assert(&s->m, MA_NOTOWNED); + mtx_assert(s, MA_NOTOWNED); #endif } static void linux_file_kqfilter_poll(struct linux_file *, int); struct linux_file * linux_file_alloc(void) { struct linux_file *filp; filp = kzalloc(sizeof(*filp), GFP_KERNEL); /* set initial refcount */ filp->f_count = 1; /* setup fields needed by kqueue support */ spin_lock_init(&filp->f_kqlock); knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock); return (filp); } void linux_file_free(struct linux_file *filp) { if (filp->_file == NULL) { if (filp->f_op != NULL && filp->f_op->release != NULL) filp->f_op->release(filp->f_vnode, filp); if (filp->f_shmem != NULL) vm_object_deallocate(filp->f_shmem); kfree_rcu(filp, rcu); } else { /* * The close method of the character device or file * will free the linux_file structure: */ _fdrop(filp->_file, curthread); } } struct linux_cdev * cdev_alloc(void) { struct linux_cdev *cdev; cdev = kzalloc(sizeof(struct linux_cdev), M_WAITOK); kobject_init(&cdev->kobj, &linux_cdev_ktype); cdev->refs = 1; return (cdev); } static int linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres) { struct vm_area_struct *vmap; vmap = linux_cdev_handle_find(vm_obj->handle); MPASS(vmap != NULL); MPASS(vmap->vm_private_data == vm_obj->handle); if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; vm_page_t page; if (((*mres)->flags & PG_FICTITIOUS) != 0) { /* * If the passed in result page is a fake * page, update it with the new physical * address. */ page = *mres; vm_page_updatefake(page, paddr, vm_obj->memattr); } else { /* * Replace the passed in "mres" page with our * own fake page and free up the all of the * original pages. */ VM_OBJECT_WUNLOCK(vm_obj); page = vm_page_getfake(paddr, vm_obj->memattr); VM_OBJECT_WLOCK(vm_obj); vm_page_replace(page, vm_obj, (*mres)->pindex, *mres); *mres = page; } vm_page_valid(page); return (VM_PAGER_OK); } return (VM_PAGER_FAIL); } static int linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) { struct vm_area_struct *vmap; int err; /* get VM area structure */ vmap = linux_cdev_handle_find(vm_obj->handle); MPASS(vmap != NULL); MPASS(vmap->vm_private_data == vm_obj->handle); VM_OBJECT_WUNLOCK(vm_obj); linux_set_current(curthread); down_write(&vmap->vm_mm->mmap_sem); if (unlikely(vmap->vm_ops == NULL)) { err = VM_FAULT_SIGBUS; } else { struct vm_fault vmf; /* fill out VM fault structure */ vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; vmf.pgoff = 0; vmf.page = NULL; vmf.vma = vmap; vmap->vm_pfn_count = 0; vmap->vm_pfn_pcount = &vmap->vm_pfn_count; vmap->vm_obj = vm_obj; err = vmap->vm_ops->fault(&vmf); while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { kern_yield(PRI_USER); err = vmap->vm_ops->fault(&vmf); } } /* translate return code */ switch (err) { case VM_FAULT_OOM: err = VM_PAGER_AGAIN; break; case VM_FAULT_SIGBUS: err = VM_PAGER_BAD; break; case VM_FAULT_NOPAGE: /* * By contract the fault handler will return having * busied all the pages itself. If pidx is already * found in the object, it will simply xbusy the first * page and return with vm_pfn_count set to 1. */ *first = vmap->vm_pfn_first; *last = *first + vmap->vm_pfn_count - 1; err = VM_PAGER_OK; break; default: err = VM_PAGER_ERROR; break; } up_write(&vmap->vm_mm->mmap_sem); VM_OBJECT_WLOCK(vm_obj); return (err); } static struct rwlock linux_vma_lock; static TAILQ_HEAD(, vm_area_struct) linux_vma_head = TAILQ_HEAD_INITIALIZER(linux_vma_head); static void linux_cdev_handle_free(struct vm_area_struct *vmap) { /* Drop reference on vm_file */ if (vmap->vm_file != NULL) fput(vmap->vm_file); /* Drop reference on mm_struct */ mmput(vmap->vm_mm); kfree(vmap); } static void linux_cdev_handle_remove(struct vm_area_struct *vmap) { rw_wlock(&linux_vma_lock); TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); rw_wunlock(&linux_vma_lock); } static struct vm_area_struct * linux_cdev_handle_find(void *handle) { struct vm_area_struct *vmap; rw_rlock(&linux_vma_lock); TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { if (vmap->vm_private_data == handle) break; } rw_runlock(&linux_vma_lock); return (vmap); } static int linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred, u_short *color) { MPASS(linux_cdev_handle_find(handle) != NULL); *color = 0; return (0); } static void linux_cdev_pager_dtor(void *handle) { const struct vm_operations_struct *vm_ops; struct vm_area_struct *vmap; vmap = linux_cdev_handle_find(handle); MPASS(vmap != NULL); /* * Remove handle before calling close operation to prevent * other threads from reusing the handle pointer. */ linux_cdev_handle_remove(vmap); down_write(&vmap->vm_mm->mmap_sem); vm_ops = vmap->vm_ops; if (likely(vm_ops != NULL)) vm_ops->close(vmap); up_write(&vmap->vm_mm->mmap_sem); linux_cdev_handle_free(vmap); } static struct cdev_pager_ops linux_cdev_pager_ops[2] = { { /* OBJT_MGTDEVICE */ .cdev_pg_populate = linux_cdev_pager_populate, .cdev_pg_ctor = linux_cdev_pager_ctor, .cdev_pg_dtor = linux_cdev_pager_dtor }, { /* OBJT_DEVICE */ .cdev_pg_fault = linux_cdev_pager_fault, .cdev_pg_ctor = linux_cdev_pager_ctor, .cdev_pg_dtor = linux_cdev_pager_dtor }, }; int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size) { vm_object_t obj; vm_page_t m; obj = vma->vm_obj; if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) return (-ENOTSUP); VM_OBJECT_RLOCK(obj); for (m = vm_page_find_least(obj, OFF_TO_IDX(address)); m != NULL && m->pindex < OFF_TO_IDX(address + size); m = TAILQ_NEXT(m, listq)) pmap_remove_all(m); VM_OBJECT_RUNLOCK(obj); return (0); } void vma_set_file(struct vm_area_struct *vma, struct linux_file *file) { struct linux_file *tmp; /* Changing an anonymous vma with this is illegal */ get_file(file); tmp = vma->vm_file; vma->vm_file = file; fput(tmp); } static struct file_operations dummy_ldev_ops = { /* XXXKIB */ }; static struct linux_cdev dummy_ldev = { .ops = &dummy_ldev_ops, }; #define LDEV_SI_DTR 0x0001 #define LDEV_SI_REF 0x0002 static void linux_get_fop(struct linux_file *filp, const struct file_operations **fop, struct linux_cdev **dev) { struct linux_cdev *ldev; u_int siref; ldev = filp->f_cdev; *fop = filp->f_op; if (ldev != NULL) { if (ldev->kobj.ktype == &linux_cdev_static_ktype) { refcount_acquire(&ldev->refs); } else { for (siref = ldev->siref;;) { if ((siref & LDEV_SI_DTR) != 0) { ldev = &dummy_ldev; *fop = ldev->ops; siref = ldev->siref; MPASS((ldev->siref & LDEV_SI_DTR) == 0); } else if (atomic_fcmpset_int(&ldev->siref, &siref, siref + LDEV_SI_REF)) { break; } } } } *dev = ldev; } static void linux_drop_fop(struct linux_cdev *ldev) { if (ldev == NULL) return; if (ldev->kobj.ktype == &linux_cdev_static_ktype) { linux_cdev_deref(ldev); } else { MPASS(ldev->kobj.ktype == &linux_cdev_ktype); MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); atomic_subtract_int(&ldev->siref, LDEV_SI_REF); } } #define OPW(fp,td,code) ({ \ struct file *__fpop; \ __typeof(code) __retval; \ \ __fpop = (td)->td_fpop; \ (td)->td_fpop = (fp); \ __retval = (code); \ (td)->td_fpop = __fpop; \ __retval; \ }) static int linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, struct file *file) { struct linux_cdev *ldev; struct linux_file *filp; const struct file_operations *fop; int error; ldev = dev->si_drv1; filp = linux_file_alloc(); filp->f_dentry = &filp->f_dentry_store; filp->f_op = ldev->ops; filp->f_mode = file->f_flag; filp->f_flags = file->f_flag; filp->f_vnode = file->f_vnode; filp->_file = file; refcount_acquire(&ldev->refs); filp->f_cdev = ldev; linux_set_current(td); linux_get_fop(filp, &fop, &ldev); if (fop->open != NULL) { error = -fop->open(file->f_vnode, filp); if (error != 0) { linux_drop_fop(ldev); linux_cdev_deref(filp->f_cdev); kfree(filp); return (error); } } /* hold on to the vnode - used for fstat() */ vhold(filp->f_vnode); /* release the file from devfs */ finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); linux_drop_fop(ldev); return (ENXIO); } #define LINUX_IOCTL_MIN_PTR 0x10000UL #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) static inline int linux_remap_address(void **uaddr, size_t len) { uintptr_t uaddr_val = (uintptr_t)(*uaddr); if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && uaddr_val < LINUX_IOCTL_MAX_PTR)) { struct task_struct *pts = current; if (pts == NULL) { *uaddr = NULL; return (1); } /* compute data offset */ uaddr_val -= LINUX_IOCTL_MIN_PTR; /* check that length is within bounds */ if ((len > IOCPARM_MAX) || (uaddr_val + len) > pts->bsd_ioctl_len) { *uaddr = NULL; return (1); } /* re-add kernel buffer address */ uaddr_val += (uintptr_t)pts->bsd_ioctl_data; /* update address location */ *uaddr = (void *)uaddr_val; return (1); } return (0); } int linux_copyin(const void *uaddr, void *kaddr, size_t len) { if (linux_remap_address(__DECONST(void **, &uaddr), len)) { if (uaddr == NULL) return (-EFAULT); memcpy(kaddr, uaddr, len); return (0); } return (-copyin(uaddr, kaddr, len)); } int linux_copyout(const void *kaddr, void *uaddr, size_t len) { if (linux_remap_address(&uaddr, len)) { if (uaddr == NULL) return (-EFAULT); memcpy(uaddr, kaddr, len); return (0); } return (-copyout(kaddr, uaddr, len)); } size_t linux_clear_user(void *_uaddr, size_t _len) { uint8_t *uaddr = _uaddr; size_t len = _len; /* make sure uaddr is aligned before going into the fast loop */ while (((uintptr_t)uaddr & 7) != 0 && len > 7) { if (subyte(uaddr, 0)) return (_len); uaddr++; len--; } /* zero 8 bytes at a time */ while (len > 7) { #ifdef __LP64__ if (suword64(uaddr, 0)) return (_len); #else if (suword32(uaddr, 0)) return (_len); if (suword32(uaddr + 4, 0)) return (_len); #endif uaddr += 8; len -= 8; } /* zero fill end, if any */ while (len > 0) { if (subyte(uaddr, 0)) return (_len); uaddr++; len--; } return (0); } int linux_access_ok(const void *uaddr, size_t len) { uintptr_t saddr; uintptr_t eaddr; /* get start and end address */ saddr = (uintptr_t)uaddr; eaddr = (uintptr_t)uaddr + len; /* verify addresses are valid for userspace */ return ((saddr == eaddr) || (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); } /* * This function should return either EINTR or ERESTART depending on * the signal type sent to this thread: */ static int linux_get_error(struct task_struct *task, int error) { /* check for signal type interrupt code */ if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { error = -linux_schedule_get_interrupt_value(task); if (error == 0) error = EINTR; } return (error); } static int linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, const struct file_operations *fop, u_long cmd, caddr_t data, struct thread *td) { struct task_struct *task = current; unsigned size; int error; size = IOCPARM_LEN(cmd); /* refer to logic in sys_ioctl() */ if (size > 0) { /* * Setup hint for linux_copyin() and linux_copyout(). * * Background: Linux code expects a user-space address * while FreeBSD supplies a kernel-space address. */ task->bsd_ioctl_data = data; task->bsd_ioctl_len = size; data = (void *)LINUX_IOCTL_MIN_PTR; } else { /* fetch user-space pointer */ data = *(void **)data; } #ifdef COMPAT_FREEBSD32 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { /* try the compat IOCTL handler first */ if (fop->compat_ioctl != NULL) { error = -OPW(fp, td, fop->compat_ioctl(filp, cmd, (u_long)data)); } else { error = ENOTTY; } /* fallback to the regular IOCTL handler, if any */ if (error == ENOTTY && fop->unlocked_ioctl != NULL) { error = -OPW(fp, td, fop->unlocked_ioctl(filp, cmd, (u_long)data)); } } else #endif { if (fop->unlocked_ioctl != NULL) { error = -OPW(fp, td, fop->unlocked_ioctl(filp, cmd, (u_long)data)); } else { error = ENOTTY; } } if (size > 0) { task->bsd_ioctl_data = NULL; task->bsd_ioctl_len = 0; } if (error == EWOULDBLOCK) { /* update kqfilter status, if any */ linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); } else { error = linux_get_error(task, error); } return (error); } #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) /* * This function atomically updates the poll wakeup state and returns * the previous state at the time of update. */ static uint8_t linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) { int c, old; c = v->counter; while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) c = old; return (c); } static int linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) { static const uint8_t state[LINUX_FWQ_STATE_MAX] = { [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ }; struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { case LINUX_FWQ_STATE_QUEUED: linux_poll_wakeup(filp); return (1); default: return (0); } } void linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) { static const uint8_t state[LINUX_FWQ_STATE_MAX] = { [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, }; /* check if we are called inside the select system call */ if (p == LINUX_POLL_TABLE_NORMAL) selrecord(curthread, &filp->f_selinfo); switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { case LINUX_FWQ_STATE_INIT: /* NOTE: file handles can only belong to one wait-queue */ filp->f_wait_queue.wqh = wqh; filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; add_wait_queue(wqh, &filp->f_wait_queue.wq); atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); break; default: break; } } static void linux_poll_wait_dequeue(struct linux_file *filp) { static const uint8_t state[LINUX_FWQ_STATE_MAX] = { [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, }; seldrain(&filp->f_selinfo); switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { case LINUX_FWQ_STATE_NOT_READY: case LINUX_FWQ_STATE_QUEUED: case LINUX_FWQ_STATE_READY: remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); break; default: break; } } void linux_poll_wakeup(struct linux_file *filp) { /* this function should be NULL-safe */ if (filp == NULL) return; selwakeup(&filp->f_selinfo); spin_lock(&filp->f_kqlock); filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | LINUX_KQ_FLAG_NEED_WRITE; /* make sure the "knote" gets woken up */ KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); spin_unlock(&filp->f_kqlock); } static void linux_file_kqfilter_detach(struct knote *kn) { struct linux_file *filp = kn->kn_hook; spin_lock(&filp->f_kqlock); knlist_remove(&filp->f_selinfo.si_note, kn, 1); spin_unlock(&filp->f_kqlock); } static int linux_file_kqfilter_read_event(struct knote *kn, long hint) { struct linux_file *filp = kn->kn_hook; - mtx_assert(&filp->f_kqlock.m, MA_OWNED); + mtx_assert(&filp->f_kqlock, MA_OWNED); return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); } static int linux_file_kqfilter_write_event(struct knote *kn, long hint) { struct linux_file *filp = kn->kn_hook; - mtx_assert(&filp->f_kqlock.m, MA_OWNED); + mtx_assert(&filp->f_kqlock, MA_OWNED); return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); } static struct filterops linux_dev_kqfiltops_read = { .f_isfd = 1, .f_detach = linux_file_kqfilter_detach, .f_event = linux_file_kqfilter_read_event, }; static struct filterops linux_dev_kqfiltops_write = { .f_isfd = 1, .f_detach = linux_file_kqfilter_detach, .f_event = linux_file_kqfilter_write_event, }; static void linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) { struct thread *td; const struct file_operations *fop; struct linux_cdev *ldev; int temp; if ((filp->f_kqflags & kqflags) == 0) return; td = curthread; linux_get_fop(filp, &fop, &ldev); /* get the latest polling state */ temp = OPW(filp->_file, td, fop->poll(filp, NULL)); linux_drop_fop(ldev); spin_lock(&filp->f_kqlock); /* clear kqflags */ filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | LINUX_KQ_FLAG_NEED_WRITE); /* update kqflags */ if ((temp & (POLLIN | POLLOUT)) != 0) { if ((temp & POLLIN) != 0) filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; if ((temp & POLLOUT) != 0) filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; /* make sure the "knote" gets woken up */ KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); } spin_unlock(&filp->f_kqlock); } static int linux_file_kqfilter(struct file *file, struct knote *kn) { struct linux_file *filp; struct thread *td; int error; td = curthread; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; if (filp->f_op->poll == NULL) return (EINVAL); spin_lock(&filp->f_kqlock); switch (kn->kn_filter) { case EVFILT_READ: filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; kn->kn_fop = &linux_dev_kqfiltops_read; kn->kn_hook = filp; knlist_add(&filp->f_selinfo.si_note, kn, 1); error = 0; break; case EVFILT_WRITE: filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; kn->kn_fop = &linux_dev_kqfiltops_write; kn->kn_hook = filp; knlist_add(&filp->f_selinfo.si_note, kn, 1); error = 0; break; default: error = EINVAL; break; } spin_unlock(&filp->f_kqlock); if (error == 0) { linux_set_current(td); /* update kqfilter status, if any */ linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); } return (error); } static int linux_file_mmap_single(struct file *fp, const struct file_operations *fop, vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, int nprot, bool is_shared, struct thread *td) { struct task_struct *task; struct vm_area_struct *vmap; struct mm_struct *mm; struct linux_file *filp; vm_memattr_t attr; int error; filp = (struct linux_file *)fp->f_data; filp->f_flags = fp->f_flag; if (fop->mmap == NULL) return (EOPNOTSUPP); linux_set_current(td); /* * The same VM object might be shared by multiple processes * and the mm_struct is usually freed when a process exits. * * The atomic reference below makes sure the mm_struct is * available as long as the vmap is in the linux_vma_head. */ task = current; mm = task->mm; if (atomic_inc_not_zero(&mm->mm_users) == 0) return (EINVAL); vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); vmap->vm_start = 0; vmap->vm_end = size; vmap->vm_pgoff = *offset / PAGE_SIZE; vmap->vm_pfn = 0; vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); if (is_shared) vmap->vm_flags |= VM_SHARED; vmap->vm_ops = NULL; vmap->vm_file = get_file(filp); vmap->vm_mm = mm; if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { error = linux_get_error(task, EINTR); } else { error = -OPW(fp, td, fop->mmap(filp, vmap)); error = linux_get_error(task, error); up_write(&vmap->vm_mm->mmap_sem); } if (error != 0) { linux_cdev_handle_free(vmap); return (error); } attr = pgprot2cachemode(vmap->vm_page_prot); if (vmap->vm_ops != NULL) { struct vm_area_struct *ptr; void *vm_private_data; bool vm_no_fault; if (vmap->vm_ops->open == NULL || vmap->vm_ops->close == NULL || vmap->vm_private_data == NULL) { /* free allocated VM area struct */ linux_cdev_handle_free(vmap); return (EINVAL); } vm_private_data = vmap->vm_private_data; rw_wlock(&linux_vma_lock); TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { if (ptr->vm_private_data == vm_private_data) break; } /* check if there is an existing VM area struct */ if (ptr != NULL) { /* check if the VM area structure is invalid */ if (ptr->vm_ops == NULL || ptr->vm_ops->open == NULL || ptr->vm_ops->close == NULL) { error = ESTALE; vm_no_fault = 1; } else { error = EEXIST; vm_no_fault = (ptr->vm_ops->fault == NULL); } } else { /* insert VM area structure into list */ TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); error = 0; vm_no_fault = (vmap->vm_ops->fault == NULL); } rw_wunlock(&linux_vma_lock); if (error != 0) { /* free allocated VM area struct */ linux_cdev_handle_free(vmap); /* check for stale VM area struct */ if (error != EEXIST) return (error); } /* check if there is no fault handler */ if (vm_no_fault) { *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, &linux_cdev_pager_ops[1], size, nprot, *offset, td->td_ucred); } else { *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, &linux_cdev_pager_ops[0], size, nprot, *offset, td->td_ucred); } /* check if allocating the VM object failed */ if (*object == NULL) { if (error == 0) { /* remove VM area struct from list */ linux_cdev_handle_remove(vmap); /* free allocated VM area struct */ linux_cdev_handle_free(vmap); } return (EINVAL); } } else { struct sglist *sg; sg = sglist_alloc(1, M_WAITOK); sglist_append_phys(sg, (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, nprot, 0, td->td_ucred); linux_cdev_handle_free(vmap); if (*object == NULL) { sglist_free(sg); return (EINVAL); } } if (attr != VM_MEMATTR_DEFAULT) { VM_OBJECT_WLOCK(*object); vm_object_set_memattr(*object, attr); VM_OBJECT_WUNLOCK(*object); } *offset = 0; return (0); } struct cdevsw linuxcdevsw = { .d_version = D_VERSION, .d_fdopen = linux_dev_fdopen, .d_name = "lkpidev", }; static int linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { struct linux_file *filp; const struct file_operations *fop; struct linux_cdev *ldev; ssize_t bytes; int error; error = 0; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; /* XXX no support for I/O vectors currently */ if (uio->uio_iovcnt != 1) return (EOPNOTSUPP); if (uio->uio_resid > DEVFS_IOSIZE_MAX) return (EINVAL); linux_set_current(td); linux_get_fop(filp, &fop, &ldev); if (fop->read != NULL) { bytes = OPW(file, td, fop->read(filp, uio->uio_iov->iov_base, uio->uio_iov->iov_len, &uio->uio_offset)); if (bytes >= 0) { uio->uio_iov->iov_base = ((uint8_t *)uio->uio_iov->iov_base) + bytes; uio->uio_iov->iov_len -= bytes; uio->uio_resid -= bytes; } else { error = linux_get_error(current, -bytes); } } else error = ENXIO; /* update kqfilter status, if any */ linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); linux_drop_fop(ldev); return (error); } static int linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { struct linux_file *filp; const struct file_operations *fop; struct linux_cdev *ldev; ssize_t bytes; int error; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; /* XXX no support for I/O vectors currently */ if (uio->uio_iovcnt != 1) return (EOPNOTSUPP); if (uio->uio_resid > DEVFS_IOSIZE_MAX) return (EINVAL); linux_set_current(td); linux_get_fop(filp, &fop, &ldev); if (fop->write != NULL) { bytes = OPW(file, td, fop->write(filp, uio->uio_iov->iov_base, uio->uio_iov->iov_len, &uio->uio_offset)); if (bytes >= 0) { uio->uio_iov->iov_base = ((uint8_t *)uio->uio_iov->iov_base) + bytes; uio->uio_iov->iov_len -= bytes; uio->uio_resid -= bytes; error = 0; } else { error = linux_get_error(current, -bytes); } } else error = ENXIO; /* update kqfilter status, if any */ linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); linux_drop_fop(ldev); return (error); } static int linux_file_poll(struct file *file, int events, struct ucred *active_cred, struct thread *td) { struct linux_file *filp; const struct file_operations *fop; struct linux_cdev *ldev; int revents; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; linux_set_current(td); linux_get_fop(filp, &fop, &ldev); if (fop->poll != NULL) { revents = OPW(file, td, fop->poll(filp, LINUX_POLL_TABLE_NORMAL)) & events; } else { revents = 0; } linux_drop_fop(ldev); return (revents); } static int linux_file_close(struct file *file, struct thread *td) { struct linux_file *filp; int (*release)(struct inode *, struct linux_file *); const struct file_operations *fop; struct linux_cdev *ldev; int error; filp = (struct linux_file *)file->f_data; KASSERT(file_count(filp) == 0, ("File refcount(%d) is not zero", file_count(filp))); if (td == NULL) td = curthread; error = 0; filp->f_flags = file->f_flag; linux_set_current(td); linux_poll_wait_dequeue(filp); linux_get_fop(filp, &fop, &ldev); /* * Always use the real release function, if any, to avoid * leaking device resources: */ release = filp->f_op->release; if (release != NULL) error = -OPW(file, td, release(filp->f_vnode, filp)); funsetown(&filp->f_sigio); if (filp->f_vnode != NULL) vdrop(filp->f_vnode); linux_drop_fop(ldev); ldev = filp->f_cdev; if (ldev != NULL) linux_cdev_deref(ldev); linux_synchronize_rcu(RCU_TYPE_REGULAR); kfree(filp); return (error); } static int linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, struct thread *td) { struct linux_file *filp; const struct file_operations *fop; struct linux_cdev *ldev; struct fiodgname_arg *fgn; const char *p; int error, i; error = 0; filp = (struct linux_file *)fp->f_data; filp->f_flags = fp->f_flag; linux_get_fop(filp, &fop, &ldev); linux_set_current(td); switch (cmd) { case FIONBIO: break; case FIOASYNC: if (fop->fasync == NULL) break; error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); break; case FIOSETOWN: error = fsetown(*(int *)data, &filp->f_sigio); if (error == 0) { if (fop->fasync == NULL) break; error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); } break; case FIOGETOWN: *(int *)data = fgetown(&filp->f_sigio); break; case FIODGNAME: #ifdef COMPAT_FREEBSD32 case FIODGNAME_32: #endif if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) { error = ENXIO; break; } fgn = data; p = devtoname(filp->f_cdev->cdev); i = strlen(p) + 1; if (i > fgn->len) { error = EINVAL; break; } error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i); break; default: error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); break; } linux_drop_fop(ldev); return (error); } static int linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, vm_prot_t maxprot, int flags, struct file *fp, vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) { /* * Character devices do not provide private mappings * of any kind: */ if ((maxprot & VM_PROT_WRITE) == 0 && (prot & VM_PROT_WRITE) != 0) return (EACCES); if ((flags & (MAP_PRIVATE | MAP_COPY)) != 0) return (EINVAL); return (linux_file_mmap_single(fp, fop, foff, objsize, objp, (int)prot, (flags & MAP_SHARED) ? true : false, td)); } static int linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, struct thread *td) { struct linux_file *filp; const struct file_operations *fop; struct linux_cdev *ldev; struct mount *mp; struct vnode *vp; vm_object_t object; vm_prot_t maxprot; int error; filp = (struct linux_file *)fp->f_data; vp = filp->f_vnode; if (vp == NULL) return (EOPNOTSUPP); /* * Ensure that file and memory protections are * compatible. */ mp = vp->v_mount; if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { maxprot = VM_PROT_NONE; if ((prot & VM_PROT_EXECUTE) != 0) return (EACCES); } else maxprot = VM_PROT_EXECUTE; if ((fp->f_flag & FREAD) != 0) maxprot |= VM_PROT_READ; else if ((prot & VM_PROT_READ) != 0) return (EACCES); /* * If we are sharing potential changes via MAP_SHARED and we * are trying to get write permission although we opened it * without asking for it, bail out. * * Note that most character devices always share mappings. * * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE * requests rather than doing it here. */ if ((flags & MAP_SHARED) != 0) { if ((fp->f_flag & FWRITE) != 0) maxprot |= VM_PROT_WRITE; else if ((prot & VM_PROT_WRITE) != 0) return (EACCES); } maxprot &= cap_maxprot; linux_get_fop(filp, &fop, &ldev); error = linux_file_mmap_sub(td, size, prot, maxprot, flags, fp, &foff, fop, &object); if (error != 0) goto out; error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, foff, FALSE, td); if (error != 0) vm_object_deallocate(object); out: linux_drop_fop(ldev); return (error); } static int linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred) { struct linux_file *filp; struct vnode *vp; int error; filp = (struct linux_file *)fp->f_data; if (filp->f_vnode == NULL) return (EOPNOTSUPP); vp = filp->f_vnode; vn_lock(vp, LK_SHARED | LK_RETRY); error = VOP_STAT(vp, sb, curthread->td_ucred, NOCRED); VOP_UNLOCK(vp); return (error); } static int linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) { struct linux_file *filp; struct vnode *vp; int error; filp = fp->f_data; vp = filp->f_vnode; if (vp == NULL) { error = 0; kif->kf_type = KF_TYPE_DEV; } else { vref(vp); FILEDESC_SUNLOCK(fdp); error = vn_fill_kinfo_vnode(vp, kif); vrele(vp); kif->kf_type = KF_TYPE_VNODE; FILEDESC_SLOCK(fdp); } return (error); } unsigned int linux_iminor(struct inode *inode) { struct linux_cdev *ldev; if (inode == NULL || inode->v_rdev == NULL || inode->v_rdev->si_devsw != &linuxcdevsw) return (-1U); ldev = inode->v_rdev->si_drv1; if (ldev == NULL) return (-1U); return (minor(ldev->dev)); } static int linux_file_kcmp(struct file *fp1, struct file *fp2, struct thread *td) { struct linux_file *filp1, *filp2; if (fp2->f_type != DTYPE_DEV) return (3); filp1 = fp1->f_data; filp2 = fp2->f_data; return (kcmp_cmp((uintptr_t)filp1->f_cdev, (uintptr_t)filp2->f_cdev)); } struct fileops linuxfileops = { .fo_read = linux_file_read, .fo_write = linux_file_write, .fo_truncate = invfo_truncate, .fo_kqfilter = linux_file_kqfilter, .fo_stat = linux_file_stat, .fo_fill_kinfo = linux_file_fill_kinfo, .fo_poll = linux_file_poll, .fo_close = linux_file_close, .fo_ioctl = linux_file_ioctl, .fo_mmap = linux_file_mmap, .fo_chmod = invfo_chmod, .fo_chown = invfo_chown, .fo_sendfile = invfo_sendfile, .fo_cmp = linux_file_kcmp, .fo_flags = DFLAG_PASSABLE, }; /* * Hash of vmmap addresses. This is infrequently accessed and does not * need to be particularly large. This is done because we must store the * caller's idea of the map size to properly unmap. */ struct vmmap { LIST_ENTRY(vmmap) vm_next; void *vm_addr; unsigned long vm_size; }; struct vmmaphd { struct vmmap *lh_first; }; #define VMMAP_HASH_SIZE 64 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; static struct mtx vmmaplock; static void vmmap_add(void *addr, unsigned long size) { struct vmmap *vmmap; vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); mtx_lock(&vmmaplock); vmmap->vm_size = size; vmmap->vm_addr = addr; LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); mtx_unlock(&vmmaplock); } static struct vmmap * vmmap_remove(void *addr) { struct vmmap *vmmap; mtx_lock(&vmmaplock); LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) if (vmmap->vm_addr == addr) break; if (vmmap) LIST_REMOVE(vmmap, vm_next); mtx_unlock(&vmmaplock); return (vmmap); } #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) void * _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) { void *addr; addr = pmap_mapdev_attr(phys_addr, size, attr); if (addr == NULL) return (NULL); vmmap_add(addr, size); return (addr); } #endif void iounmap(void *addr) { struct vmmap *vmmap; vmmap = vmmap_remove(addr); if (vmmap == NULL) return; #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) pmap_unmapdev(addr, vmmap->vm_size); #endif kfree(vmmap); } void * vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) { vm_offset_t off; size_t size; size = count * PAGE_SIZE; off = kva_alloc(size); if (off == 0) return (NULL); vmmap_add((void *)off, size); pmap_qenter(off, pages, count); return ((void *)off); } void vunmap(void *addr) { struct vmmap *vmmap; vmmap = vmmap_remove(addr); if (vmmap == NULL) return; pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); kva_free((vm_offset_t)addr, vmmap->vm_size); kfree(vmmap); } static char * devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) { unsigned int len; char *p; va_list aq; va_copy(aq, ap); len = vsnprintf(NULL, 0, fmt, aq); va_end(aq); if (dev != NULL) p = devm_kmalloc(dev, len + 1, gfp); else p = kmalloc(len + 1, gfp); if (p != NULL) vsnprintf(p, len + 1, fmt, ap); return (p); } char * kvasprintf(gfp_t gfp, const char *fmt, va_list ap) { return (devm_kvasprintf(NULL, gfp, fmt, ap)); } char * lkpi_devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) { va_list ap; char *p; va_start(ap, fmt); p = devm_kvasprintf(dev, gfp, fmt, ap); va_end(ap); return (p); } char * kasprintf(gfp_t gfp, const char *fmt, ...) { va_list ap; char *p; va_start(ap, fmt); p = kvasprintf(gfp, fmt, ap); va_end(ap); return (p); } static void linux_timer_callback_wrapper(void *context) { struct timer_list *timer; timer = context; /* the timer is about to be shutdown permanently */ if (timer->function == NULL) return; if (linux_set_current_flags(curthread, M_NOWAIT)) { /* try again later */ callout_reset(&timer->callout, 1, &linux_timer_callback_wrapper, timer); return; } timer->function(timer->data); } int mod_timer(struct timer_list *timer, int expires) { int ret; timer->expires = expires; ret = callout_reset(&timer->callout, linux_timer_jiffies_until(expires), &linux_timer_callback_wrapper, timer); MPASS(ret == 0 || ret == 1); return (ret == 1); } void add_timer(struct timer_list *timer) { callout_reset(&timer->callout, linux_timer_jiffies_until(timer->expires), &linux_timer_callback_wrapper, timer); } void add_timer_on(struct timer_list *timer, int cpu) { callout_reset_on(&timer->callout, linux_timer_jiffies_until(timer->expires), &linux_timer_callback_wrapper, timer, cpu); } int del_timer(struct timer_list *timer) { if (callout_stop(&(timer)->callout) == -1) return (0); return (1); } int del_timer_sync(struct timer_list *timer) { if (callout_drain(&(timer)->callout) == -1) return (0); return (1); } int timer_delete_sync(struct timer_list *timer) { return (del_timer_sync(timer)); } int timer_shutdown_sync(struct timer_list *timer) { timer->function = NULL; return (del_timer_sync(timer)); } /* greatest common divisor, Euclid equation */ static uint64_t lkpi_gcd_64(uint64_t a, uint64_t b) { uint64_t an; uint64_t bn; while (b != 0) { an = b; bn = a % b; a = an; b = bn; } return (a); } uint64_t lkpi_nsec2hz_rem; uint64_t lkpi_nsec2hz_div = 1000000000ULL; uint64_t lkpi_nsec2hz_max; uint64_t lkpi_usec2hz_rem; uint64_t lkpi_usec2hz_div = 1000000ULL; uint64_t lkpi_usec2hz_max; uint64_t lkpi_msec2hz_rem; uint64_t lkpi_msec2hz_div = 1000ULL; uint64_t lkpi_msec2hz_max; static void linux_timer_init(void *arg) { uint64_t gcd; /* * Compute an internal HZ value which can divide 2**32 to * avoid timer rounding problems when the tick value wraps * around 2**32: */ linux_timer_hz_mask = 1; while (linux_timer_hz_mask < (unsigned long)hz) linux_timer_hz_mask *= 2; linux_timer_hz_mask--; /* compute some internal constants */ lkpi_nsec2hz_rem = hz; lkpi_usec2hz_rem = hz; lkpi_msec2hz_rem = hz; gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div); lkpi_nsec2hz_rem /= gcd; lkpi_nsec2hz_div /= gcd; lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem; gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div); lkpi_usec2hz_rem /= gcd; lkpi_usec2hz_div /= gcd; lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem; gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div); lkpi_msec2hz_rem /= gcd; lkpi_msec2hz_div /= gcd; lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem; } SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); void linux_complete_common(struct completion *c, int all) { int wakeup_swapper; sleepq_lock(c); if (all) { c->done = UINT_MAX; wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); } else { if (c->done != UINT_MAX) c->done++; wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); } sleepq_release(c); if (wakeup_swapper) kick_proc0(); } /* * Indefinite wait for done != 0 with or without signals. */ int linux_wait_for_common(struct completion *c, int flags) { struct task_struct *task; int error; if (SCHEDULER_STOPPED()) return (0); task = current; if (flags != 0) flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; else flags = SLEEPQ_SLEEP; error = 0; for (;;) { sleepq_lock(c); if (c->done) break; sleepq_add(c, NULL, "completion", flags, 0); if (flags & SLEEPQ_INTERRUPTIBLE) { DROP_GIANT(); error = -sleepq_wait_sig(c, 0); PICKUP_GIANT(); if (error != 0) { linux_schedule_save_interrupt_value(task, error); error = -ERESTARTSYS; goto intr; } } else { DROP_GIANT(); sleepq_wait(c, 0); PICKUP_GIANT(); } } if (c->done != UINT_MAX) c->done--; sleepq_release(c); intr: return (error); } /* * Time limited wait for done != 0 with or without signals. */ int linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) { struct task_struct *task; int end = jiffies + timeout; int error; if (SCHEDULER_STOPPED()) return (0); task = current; if (flags != 0) flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; else flags = SLEEPQ_SLEEP; for (;;) { sleepq_lock(c); if (c->done) break; sleepq_add(c, NULL, "completion", flags, 0); sleepq_set_timeout(c, linux_timer_jiffies_until(end)); DROP_GIANT(); if (flags & SLEEPQ_INTERRUPTIBLE) error = -sleepq_timedwait_sig(c, 0); else error = -sleepq_timedwait(c, 0); PICKUP_GIANT(); if (error != 0) { /* check for timeout */ if (error == -EWOULDBLOCK) { error = 0; /* timeout */ } else { /* signal happened */ linux_schedule_save_interrupt_value(task, error); error = -ERESTARTSYS; } goto done; } } if (c->done != UINT_MAX) c->done--; sleepq_release(c); /* return how many jiffies are left */ error = linux_timer_jiffies_until(end); done: return (error); } int linux_try_wait_for_completion(struct completion *c) { int isdone; sleepq_lock(c); isdone = (c->done != 0); if (c->done != 0 && c->done != UINT_MAX) c->done--; sleepq_release(c); return (isdone); } int linux_completion_done(struct completion *c) { int isdone; sleepq_lock(c); isdone = (c->done != 0); sleepq_release(c); return (isdone); } static void linux_cdev_deref(struct linux_cdev *ldev) { if (refcount_release(&ldev->refs) && ldev->kobj.ktype == &linux_cdev_ktype) kfree(ldev); } static void linux_cdev_release(struct kobject *kobj) { struct linux_cdev *cdev; struct kobject *parent; cdev = container_of(kobj, struct linux_cdev, kobj); parent = kobj->parent; linux_destroy_dev(cdev); linux_cdev_deref(cdev); kobject_put(parent); } static void linux_cdev_static_release(struct kobject *kobj) { struct cdev *cdev; struct linux_cdev *ldev; ldev = container_of(kobj, struct linux_cdev, kobj); cdev = ldev->cdev; if (cdev != NULL) { destroy_dev(cdev); ldev->cdev = NULL; } kobject_put(kobj->parent); } int linux_cdev_device_add(struct linux_cdev *ldev, struct device *dev) { int ret; if (dev->devt != 0) { /* Set parent kernel object. */ ldev->kobj.parent = &dev->kobj; /* * Unlike Linux we require the kobject of the * character device structure to have a valid name * before calling this function: */ if (ldev->kobj.name == NULL) return (-EINVAL); ret = cdev_add(ldev, dev->devt, 1); if (ret) return (ret); } ret = device_add(dev); if (ret != 0 && dev->devt != 0) cdev_del(ldev); return (ret); } void linux_cdev_device_del(struct linux_cdev *ldev, struct device *dev) { device_del(dev); if (dev->devt != 0) cdev_del(ldev); } static void linux_destroy_dev(struct linux_cdev *ldev) { if (ldev->cdev == NULL) return; MPASS((ldev->siref & LDEV_SI_DTR) == 0); MPASS(ldev->kobj.ktype == &linux_cdev_ktype); atomic_set_int(&ldev->siref, LDEV_SI_DTR); while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) pause("ldevdtr", hz / 4); destroy_dev(ldev->cdev); ldev->cdev = NULL; } const struct kobj_type linux_cdev_ktype = { .release = linux_cdev_release, }; const struct kobj_type linux_cdev_static_ktype = { .release = linux_cdev_static_release, }; static void linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) { struct notifier_block *nb; struct netdev_notifier_info ni; nb = arg; ni.ifp = ifp; ni.dev = (struct net_device *)ifp; if (linkstate == LINK_STATE_UP) nb->notifier_call(nb, NETDEV_UP, &ni); else nb->notifier_call(nb, NETDEV_DOWN, &ni); } static void linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; struct netdev_notifier_info ni; nb = arg; ni.ifp = ifp; ni.dev = (struct net_device *)ifp; nb->notifier_call(nb, NETDEV_REGISTER, &ni); } static void linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; struct netdev_notifier_info ni; nb = arg; ni.ifp = ifp; ni.dev = (struct net_device *)ifp; nb->notifier_call(nb, NETDEV_UNREGISTER, &ni); } static void linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; struct netdev_notifier_info ni; nb = arg; ni.ifp = ifp; ni.dev = (struct net_device *)ifp; nb->notifier_call(nb, NETDEV_CHANGEADDR, &ni); } static void linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; struct netdev_notifier_info ni; nb = arg; ni.ifp = ifp; ni.dev = (struct net_device *)ifp; nb->notifier_call(nb, NETDEV_CHANGEIFADDR, &ni); } int register_netdevice_notifier(struct notifier_block *nb) { nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( iflladdr_event, linux_handle_iflladdr_event, nb, 0); return (0); } int register_inetaddr_notifier(struct notifier_block *nb) { nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( ifaddr_event, linux_handle_ifaddr_event, nb, 0); return (0); } int unregister_netdevice_notifier(struct notifier_block *nb) { EVENTHANDLER_DEREGISTER(ifnet_link_event, nb->tags[NETDEV_UP]); EVENTHANDLER_DEREGISTER(ifnet_arrival_event, nb->tags[NETDEV_REGISTER]); EVENTHANDLER_DEREGISTER(ifnet_departure_event, nb->tags[NETDEV_UNREGISTER]); EVENTHANDLER_DEREGISTER(iflladdr_event, nb->tags[NETDEV_CHANGEADDR]); return (0); } int unregister_inetaddr_notifier(struct notifier_block *nb) { EVENTHANDLER_DEREGISTER(ifaddr_event, nb->tags[NETDEV_CHANGEIFADDR]); return (0); } struct list_sort_thunk { int (*cmp)(void *, struct list_head *, struct list_head *); void *priv; }; static inline int linux_le_cmp(const void *d1, const void *d2, void *priv) { struct list_head *le1, *le2; struct list_sort_thunk *thunk; thunk = priv; le1 = *(__DECONST(struct list_head **, d1)); le2 = *(__DECONST(struct list_head **, d2)); return ((thunk->cmp)(thunk->priv, le1, le2)); } void list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, struct list_head *a, struct list_head *b)) { struct list_sort_thunk thunk; struct list_head **ar, *le; size_t count, i; count = 0; list_for_each(le, head) count++; ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); i = 0; list_for_each(le, head) ar[i++] = le; thunk.cmp = cmp; thunk.priv = priv; qsort_r(ar, count, sizeof(struct list_head *), linux_le_cmp, &thunk); INIT_LIST_HEAD(head); for (i = 0; i < count; i++) list_add_tail(ar[i], head); free(ar, M_KMALLOC); } #if defined(__i386__) || defined(__amd64__) int linux_wbinvd_on_all_cpus(void) { pmap_invalidate_cache(); return (0); } #endif int linux_on_each_cpu(void callback(void *), void *data) { smp_rendezvous(smp_no_rendezvous_barrier, callback, smp_no_rendezvous_barrier, data); return (0); } int linux_in_atomic(void) { return ((curthread->td_pflags & TDP_NOFAULTING) != 0); } struct linux_cdev * linux_find_cdev(const char *name, unsigned major, unsigned minor) { dev_t dev = MKDEV(major, minor); struct cdev *cdev; dev_lock(); LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { struct linux_cdev *ldev = cdev->si_drv1; if (ldev->dev == dev && strcmp(kobject_name(&ldev->kobj), name) == 0) { break; } } dev_unlock(); return (cdev != NULL ? cdev->si_drv1 : NULL); } int __register_chrdev(unsigned int major, unsigned int baseminor, unsigned int count, const char *name, const struct file_operations *fops) { struct linux_cdev *cdev; int ret = 0; int i; for (i = baseminor; i < baseminor + count; i++) { cdev = cdev_alloc(); cdev->ops = fops; kobject_set_name(&cdev->kobj, name); ret = cdev_add(cdev, makedev(major, i), 1); if (ret != 0) break; } return (ret); } int __register_chrdev_p(unsigned int major, unsigned int baseminor, unsigned int count, const char *name, const struct file_operations *fops, uid_t uid, gid_t gid, int mode) { struct linux_cdev *cdev; int ret = 0; int i; for (i = baseminor; i < baseminor + count; i++) { cdev = cdev_alloc(); cdev->ops = fops; kobject_set_name(&cdev->kobj, name); ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); if (ret != 0) break; } return (ret); } void __unregister_chrdev(unsigned int major, unsigned int baseminor, unsigned int count, const char *name) { struct linux_cdev *cdevp; int i; for (i = baseminor; i < baseminor + count; i++) { cdevp = linux_find_cdev(name, major, i); if (cdevp != NULL) cdev_del(cdevp); } } void linux_dump_stack(void) { #ifdef STACK struct stack st; stack_save(&st); stack_print(&st); #endif } int linuxkpi_net_ratelimit(void) { return (ppsratecheck(&lkpi_net_lastlog, &lkpi_net_curpps, lkpi_net_maxpps)); } struct io_mapping * io_mapping_create_wc(resource_size_t base, unsigned long size) { struct io_mapping *mapping; mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); if (mapping == NULL) return (NULL); return (io_mapping_init_wc(mapping, base, size)); } #if defined(__i386__) || defined(__amd64__) bool linux_cpu_has_clflush; struct cpuinfo_x86 boot_cpu_data; struct cpuinfo_x86 *__cpu_data; #endif cpumask_t * lkpi_get_static_single_cpu_mask(int cpuid) { KASSERT((cpuid >= 0 && cpuid <= mp_maxid), ("%s: invalid cpuid %d\n", __func__, cpuid)); KASSERT(!CPU_ABSENT(cpuid), ("%s: cpu with cpuid %d is absent\n", __func__, cpuid)); return (static_single_cpu_mask[cpuid]); } bool lkpi_xen_initial_domain(void) { #ifdef XENHVM return (xen_initial_domain()); #else return (false); #endif } bool lkpi_xen_pv_domain(void) { #ifdef XENHVM return (xen_pv_domain()); #else return (false); #endif } static void linux_compat_init(void *arg) { struct sysctl_oid *rootoid; int i; #if defined(__i386__) || defined(__amd64__) static const uint32_t x86_vendors[X86_VENDOR_NUM] = { [X86_VENDOR_INTEL] = CPU_VENDOR_INTEL, [X86_VENDOR_CYRIX] = CPU_VENDOR_CYRIX, [X86_VENDOR_AMD] = CPU_VENDOR_AMD, [X86_VENDOR_UMC] = CPU_VENDOR_UMC, [X86_VENDOR_CENTAUR] = CPU_VENDOR_CENTAUR, [X86_VENDOR_TRANSMETA] = CPU_VENDOR_TRANSMETA, [X86_VENDOR_NSC] = CPU_VENDOR_NSC, [X86_VENDOR_HYGON] = CPU_VENDOR_HYGON, }; uint8_t x86_vendor = X86_VENDOR_UNKNOWN; for (i = 0; i < X86_VENDOR_NUM; i++) { if (cpu_vendor_id != 0 && cpu_vendor_id == x86_vendors[i]) { x86_vendor = i; break; } } linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); boot_cpu_data.x86_clflush_size = cpu_clflush_line_size; boot_cpu_data.x86_max_cores = mp_ncpus; boot_cpu_data.x86 = CPUID_TO_FAMILY(cpu_id); boot_cpu_data.x86_model = CPUID_TO_MODEL(cpu_id); boot_cpu_data.x86_vendor = x86_vendor; __cpu_data = mallocarray(mp_maxid + 1, sizeof(*__cpu_data), M_KMALLOC, M_WAITOK | M_ZERO); CPU_FOREACH(i) { __cpu_data[i].x86_clflush_size = cpu_clflush_line_size; __cpu_data[i].x86_max_cores = mp_ncpus; __cpu_data[i].x86 = CPUID_TO_FAMILY(cpu_id); __cpu_data[i].x86_model = CPUID_TO_MODEL(cpu_id); __cpu_data[i].x86_vendor = x86_vendor; } #endif rw_init(&linux_vma_lock, "lkpi-vma-lock"); rootoid = SYSCTL_ADD_ROOT_NODE(NULL, OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); kobject_init(&linux_class_root, &linux_class_ktype); kobject_set_name(&linux_class_root, "class"); linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); kobject_init(&linux_root_device.kobj, &linux_dev_ktype); kobject_set_name(&linux_root_device.kobj, "device"); linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device"); linux_root_device.bsddev = root_bus; linux_class_misc.name = "misc"; class_register(&linux_class_misc); INIT_LIST_HEAD(&pci_drivers); INIT_LIST_HEAD(&pci_devices); spin_lock_init(&pci_lock); mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); for (i = 0; i < VMMAP_HASH_SIZE; i++) LIST_INIT(&vmmaphead[i]); init_waitqueue_head(&linux_bit_waitq); init_waitqueue_head(&linux_var_waitq); CPU_COPY(&all_cpus, &cpu_online_mask); /* * Generate a single-CPU cpumask_t for each CPU (possibly) in the system. * CPUs are indexed from 0..(mp_maxid). The entry for cpuid 0 will only * have itself in the cpumask, cupid 1 only itself on entry 1, and so on. * This is used by cpumask_of() (and possibly others in the future) for, * e.g., drivers to pass hints to irq_set_affinity_hint(). */ static_single_cpu_mask = mallocarray(mp_maxid + 1, sizeof(static_single_cpu_mask), M_KMALLOC, M_WAITOK | M_ZERO); /* * When the number of CPUs reach a threshold, we start to save memory * given the sets are static by overlapping those having their single * bit set at same position in a bitset word. Asymptotically, this * regular scheme is in O(n²) whereas the overlapping one is in O(n) * only with n being the maximum number of CPUs, so the gain will become * huge quite quickly. The threshold for 64-bit architectures is 128 * CPUs. */ if (mp_ncpus < (2 * _BITSET_BITS)) { cpumask_t *sscm_ptr; /* * This represents 'mp_ncpus * __bitset_words(CPU_SETSIZE) * * (_BITSET_BITS / 8)' bytes (for comparison with the * overlapping scheme). */ static_single_cpu_mask_lcs = mallocarray(mp_ncpus, sizeof(*static_single_cpu_mask_lcs), M_KMALLOC, M_WAITOK | M_ZERO); sscm_ptr = static_single_cpu_mask_lcs; CPU_FOREACH(i) { static_single_cpu_mask[i] = sscm_ptr++; CPU_SET(i, static_single_cpu_mask[i]); } } else { /* Pointer to a bitset word. */ __typeof(((cpuset_t *)NULL)->__bits[0]) *bwp; /* * Allocate memory for (static) spans of 'cpumask_t' ('cpuset_t' * really) with a single bit set that can be reused for all * single CPU masks by making them start at different offsets. * We need '__bitset_words(CPU_SETSIZE) - 1' bitset words before * the word having its single bit set, and the same amount * after. */ static_single_cpu_mask_lcs = mallocarray(_BITSET_BITS, (2 * __bitset_words(CPU_SETSIZE) - 1) * (_BITSET_BITS / 8), M_KMALLOC, M_WAITOK | M_ZERO); /* * We rely below on cpuset_t and the bitset generic * implementation assigning words in the '__bits' array in the * same order of bits (i.e., little-endian ordering, not to be * confused with machine endianness, which concerns bits in * words and other integers). This is an imperfect test, but it * will detect a change to big-endian ordering. */ _Static_assert( __bitset_word(_BITSET_BITS + 1, _BITSET_BITS) == 1, "Assumes a bitset implementation that is little-endian " "on its words"); /* Initialize the single bit of each static span. */ bwp = (__typeof(bwp))static_single_cpu_mask_lcs + (__bitset_words(CPU_SETSIZE) - 1); for (i = 0; i < _BITSET_BITS; i++) { CPU_SET(i, (cpuset_t *)bwp); bwp += (2 * __bitset_words(CPU_SETSIZE) - 1); } /* * Finally set all CPU masks to the proper word in their * relevant span. */ CPU_FOREACH(i) { bwp = (__typeof(bwp))static_single_cpu_mask_lcs; /* Find the non-zero word of the relevant span. */ bwp += (2 * __bitset_words(CPU_SETSIZE) - 1) * (i % _BITSET_BITS) + __bitset_words(CPU_SETSIZE) - 1; /* Shift to find the CPU mask start. */ bwp -= (i / _BITSET_BITS); static_single_cpu_mask[i] = (cpuset_t *)bwp; } } strlcpy(init_uts_ns.name.release, osrelease, sizeof(init_uts_ns.name.release)); } SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); static void linux_compat_uninit(void *arg) { linux_kobject_kfree_name(&linux_class_root); linux_kobject_kfree_name(&linux_root_device.kobj); linux_kobject_kfree_name(&linux_class_misc.kobj); free(static_single_cpu_mask_lcs, M_KMALLOC); free(static_single_cpu_mask, M_KMALLOC); #if defined(__i386__) || defined(__amd64__) free(__cpu_data, M_KMALLOC); #endif mtx_destroy(&vmmaplock); spin_lock_destroy(&pci_lock); rw_destroy(&linux_vma_lock); } SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); /* * NOTE: Linux frequently uses "unsigned long" for pointer to integer * conversion and vice versa, where in FreeBSD "uintptr_t" would be * used. Assert these types have the same size, else some parts of the * LinuxKPI may not work like expected: */ CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); diff --git a/sys/compat/linuxkpi/common/src/linux_idr.c b/sys/compat/linuxkpi/common/src/linux_idr.c index dc64da0d7cf5..583e2c237198 100644 --- a/sys/compat/linuxkpi/common/src/linux_idr.c +++ b/sys/compat/linuxkpi/common/src/linux_idr.c @@ -1,805 +1,805 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS) #define MAX_IDR_FREE (MAX_IDR_LEVEL * 2) struct linux_idr_cache { spinlock_t lock; struct idr_layer *head; unsigned count; }; DPCPU_DEFINE_STATIC(struct linux_idr_cache, linux_idr_cache); /* * IDR Implementation. * * This is quick and dirty and not as re-entrant as the linux version * however it should be fairly fast. It is basically a radix tree with * a builtin bitmap for allocation. */ static MALLOC_DEFINE(M_IDR, "idr", "Linux IDR compat"); static struct idr_layer * idr_preload_dequeue_locked(struct linux_idr_cache *lic) { struct idr_layer *retval; /* check if wrong thread is trying to dequeue */ - if (mtx_owned(&lic->lock.m) == 0) + if (mtx_owned(&lic->lock) == 0) return (NULL); retval = lic->head; if (likely(retval != NULL)) { lic->head = retval->ary[0]; lic->count--; retval->ary[0] = NULL; } return (retval); } static void idr_preload_init(void *arg) { int cpu; CPU_FOREACH(cpu) { struct linux_idr_cache *lic = DPCPU_ID_PTR(cpu, linux_idr_cache); spin_lock_init(&lic->lock); } } SYSINIT(idr_preload_init, SI_SUB_CPU, SI_ORDER_ANY, idr_preload_init, NULL); static void idr_preload_uninit(void *arg) { int cpu; CPU_FOREACH(cpu) { struct idr_layer *cacheval; struct linux_idr_cache *lic = DPCPU_ID_PTR(cpu, linux_idr_cache); while (1) { spin_lock(&lic->lock); cacheval = idr_preload_dequeue_locked(lic); spin_unlock(&lic->lock); if (cacheval == NULL) break; free(cacheval, M_IDR); } spin_lock_destroy(&lic->lock); } } SYSUNINIT(idr_preload_uninit, SI_SUB_LOCK, SI_ORDER_FIRST, idr_preload_uninit, NULL); void idr_preload(gfp_t gfp_mask) { struct linux_idr_cache *lic; struct idr_layer *cacheval; sched_pin(); lic = &DPCPU_GET(linux_idr_cache); /* fill up cache */ spin_lock(&lic->lock); while (lic->count < MAX_IDR_FREE) { spin_unlock(&lic->lock); cacheval = malloc(sizeof(*cacheval), M_IDR, M_ZERO | gfp_mask); spin_lock(&lic->lock); if (cacheval == NULL) break; cacheval->ary[0] = lic->head; lic->head = cacheval; lic->count++; } } void idr_preload_end(void) { struct linux_idr_cache *lic; lic = &DPCPU_GET(linux_idr_cache); spin_unlock(&lic->lock); sched_unpin(); } static inline int idr_max(struct idr *idr) { return (1 << (idr->layers * IDR_BITS)) - 1; } static inline int idr_pos(int id, int layer) { return (id >> (IDR_BITS * layer)) & IDR_MASK; } void idr_init(struct idr *idr) { bzero(idr, sizeof(*idr)); mtx_init(&idr->lock, "idr", NULL, MTX_DEF); } /* Only frees cached pages. */ void idr_destroy(struct idr *idr) { struct idr_layer *il, *iln; idr_remove_all(idr); mtx_lock(&idr->lock); for (il = idr->free; il != NULL; il = iln) { iln = il->ary[0]; free(il, M_IDR); } mtx_unlock(&idr->lock); mtx_destroy(&idr->lock); } static void idr_remove_layer(struct idr_layer *il, int layer) { int i; if (il == NULL) return; if (layer == 0) { free(il, M_IDR); return; } for (i = 0; i < IDR_SIZE; i++) if (il->ary[i]) idr_remove_layer(il->ary[i], layer - 1); } void idr_remove_all(struct idr *idr) { mtx_lock(&idr->lock); idr_remove_layer(idr->top, idr->layers - 1); idr->top = NULL; idr->layers = 0; mtx_unlock(&idr->lock); } static void * idr_remove_locked(struct idr *idr, int id) { struct idr_layer *il; void *res; int layer; int idx; id &= MAX_ID_MASK; il = idr->top; layer = idr->layers - 1; if (il == NULL || id > idr_max(idr)) return (NULL); /* * Walk down the tree to this item setting bitmaps along the way * as we know at least one item will be free along this path. */ while (layer && il) { idx = idr_pos(id, layer); il->bitmap |= 1 << idx; il = il->ary[idx]; layer--; } idx = id & IDR_MASK; /* * At this point we've set free space bitmaps up the whole tree. * We could make this non-fatal and unwind but linux dumps a stack * and a warning so I don't think it's necessary. */ if (il == NULL || (il->bitmap & (1 << idx)) != 0) panic("idr_remove: Item %d not allocated (%p, %p)\n", id, idr, il); res = il->ary[idx]; il->ary[idx] = NULL; il->bitmap |= 1 << idx; return (res); } void * idr_remove(struct idr *idr, int id) { void *res; mtx_lock(&idr->lock); res = idr_remove_locked(idr, id); mtx_unlock(&idr->lock); return (res); } static inline struct idr_layer * idr_find_layer_locked(struct idr *idr, int id) { struct idr_layer *il; int layer; id &= MAX_ID_MASK; il = idr->top; layer = idr->layers - 1; if (il == NULL || id > idr_max(idr)) return (NULL); while (layer && il) { il = il->ary[idr_pos(id, layer)]; layer--; } return (il); } void * idr_replace(struct idr *idr, void *ptr, int id) { struct idr_layer *il; void *res; int idx; mtx_lock(&idr->lock); il = idr_find_layer_locked(idr, id); idx = id & IDR_MASK; /* Replace still returns an error if the item was not allocated. */ if (il == NULL || (il->bitmap & (1 << idx))) { res = ERR_PTR(-ENOENT); } else { res = il->ary[idx]; il->ary[idx] = ptr; } mtx_unlock(&idr->lock); return (res); } static inline void * idr_find_locked(struct idr *idr, int id) { struct idr_layer *il; void *res; mtx_assert(&idr->lock, MA_OWNED); il = idr_find_layer_locked(idr, id); if (il != NULL) res = il->ary[id & IDR_MASK]; else res = NULL; return (res); } void * idr_find(struct idr *idr, int id) { void *res; mtx_lock(&idr->lock); res = idr_find_locked(idr, id); mtx_unlock(&idr->lock); return (res); } void * idr_get_next(struct idr *idr, int *nextidp) { void *res = NULL; int id = *nextidp; mtx_lock(&idr->lock); for (; id <= idr_max(idr); id++) { res = idr_find_locked(idr, id); if (res == NULL) continue; *nextidp = id; break; } mtx_unlock(&idr->lock); return (res); } int idr_pre_get(struct idr *idr, gfp_t gfp_mask) { struct idr_layer *il, *iln; struct idr_layer *head; int need; mtx_lock(&idr->lock); for (;;) { need = idr->layers + 1; for (il = idr->free; il != NULL; il = il->ary[0]) need--; mtx_unlock(&idr->lock); if (need <= 0) break; for (head = NULL; need; need--) { iln = malloc(sizeof(*il), M_IDR, M_ZERO | gfp_mask); if (iln == NULL) break; bitmap_fill(&iln->bitmap, IDR_SIZE); if (head != NULL) { il->ary[0] = iln; il = iln; } else head = il = iln; } if (head == NULL) return (0); mtx_lock(&idr->lock); il->ary[0] = idr->free; idr->free = head; } return (1); } static struct idr_layer * idr_free_list_get(struct idr *idp) { struct idr_layer *il; if ((il = idp->free) != NULL) { idp->free = il->ary[0]; il->ary[0] = NULL; } return (il); } static inline struct idr_layer * idr_get(struct idr *idp) { struct idr_layer *il; if ((il = idr_free_list_get(idp)) != NULL) { MPASS(il->bitmap != 0); } else if ((il = malloc(sizeof(*il), M_IDR, M_ZERO | M_NOWAIT)) != NULL) { bitmap_fill(&il->bitmap, IDR_SIZE); } else if ((il = idr_preload_dequeue_locked(&DPCPU_GET(linux_idr_cache))) != NULL) { bitmap_fill(&il->bitmap, IDR_SIZE); } else { return (NULL); } return (il); } /* * Could be implemented as get_new_above(idr, ptr, 0, idp) but written * first for simplicity sake. */ static int idr_get_new_locked(struct idr *idr, void *ptr, int *idp) { struct idr_layer *stack[MAX_LEVEL]; struct idr_layer *il; int error; int layer; int idx; int id; mtx_assert(&idr->lock, MA_OWNED); error = -EAGAIN; /* * Expand the tree until there is free space. */ if (idr->top == NULL || idr->top->bitmap == 0) { if (idr->layers == MAX_LEVEL + 1) { error = -ENOSPC; goto out; } il = idr_get(idr); if (il == NULL) goto out; il->ary[0] = idr->top; if (idr->top) il->bitmap &= ~1; idr->top = il; idr->layers++; } il = idr->top; id = 0; /* * Walk the tree following free bitmaps, record our path. */ for (layer = idr->layers - 1;; layer--) { stack[layer] = il; idx = ffsl(il->bitmap); if (idx == 0) panic("idr_get_new: Invalid leaf state (%p, %p)\n", idr, il); idx--; id |= idx << (layer * IDR_BITS); if (layer == 0) break; if (il->ary[idx] == NULL) { il->ary[idx] = idr_get(idr); if (il->ary[idx] == NULL) goto out; } il = il->ary[idx]; } /* * Allocate the leaf to the consumer. */ il->bitmap &= ~(1 << idx); il->ary[idx] = ptr; *idp = id; /* * Clear bitmaps potentially up to the root. */ while (il->bitmap == 0 && ++layer < idr->layers) { il = stack[layer]; il->bitmap &= ~(1 << idr_pos(id, layer)); } error = 0; out: #ifdef INVARIANTS if (error == 0 && idr_find_locked(idr, id) != ptr) { panic("idr_get_new: Failed for idr %p, id %d, ptr %p\n", idr, id, ptr); } #endif return (error); } int idr_get_new(struct idr *idr, void *ptr, int *idp) { int retval; mtx_lock(&idr->lock); retval = idr_get_new_locked(idr, ptr, idp); mtx_unlock(&idr->lock); return (retval); } static int idr_get_new_above_locked(struct idr *idr, void *ptr, int starting_id, int *idp) { struct idr_layer *stack[MAX_LEVEL]; struct idr_layer *il; int error; int layer; int idx, sidx; int id; mtx_assert(&idr->lock, MA_OWNED); error = -EAGAIN; /* * Compute the layers required to support starting_id and the mask * at the top layer. */ restart: idx = starting_id; layer = 0; while (idx & ~IDR_MASK) { layer++; idx >>= IDR_BITS; } if (layer == MAX_LEVEL + 1) { error = -ENOSPC; goto out; } /* * Expand the tree until there is free space at or beyond starting_id. */ while (idr->layers <= layer || idr->top->bitmap < (1 << idr_pos(starting_id, idr->layers - 1))) { if (idr->layers == MAX_LEVEL + 1) { error = -ENOSPC; goto out; } il = idr_get(idr); if (il == NULL) goto out; il->ary[0] = idr->top; if (idr->top && idr->top->bitmap == 0) il->bitmap &= ~1; idr->top = il; idr->layers++; } il = idr->top; id = 0; /* * Walk the tree following free bitmaps, record our path. */ for (layer = idr->layers - 1;; layer--) { stack[layer] = il; sidx = idr_pos(starting_id, layer); /* Returns index numbered from 0 or size if none exists. */ idx = find_next_bit(&il->bitmap, IDR_SIZE, sidx); if (idx == IDR_SIZE && sidx == 0) panic("idr_get_new: Invalid leaf state (%p, %p)\n", idr, il); /* * We may have walked a path where there was a free bit but * it was lower than what we wanted. Restart the search with * a larger starting id. id contains the progress we made so * far. Search the leaf one above this level. This may * restart as many as MAX_LEVEL times but that is expected * to be rare. */ if (idx == IDR_SIZE) { starting_id = id + (1 << ((layer + 1) * IDR_BITS)); goto restart; } if (idx > sidx) starting_id = 0; /* Search the whole subtree. */ id |= idx << (layer * IDR_BITS); if (layer == 0) break; if (il->ary[idx] == NULL) { il->ary[idx] = idr_get(idr); if (il->ary[idx] == NULL) goto out; } il = il->ary[idx]; } /* * Allocate the leaf to the consumer. */ il->bitmap &= ~(1 << idx); il->ary[idx] = ptr; *idp = id; /* * Clear bitmaps potentially up to the root. */ while (il->bitmap == 0 && ++layer < idr->layers) { il = stack[layer]; il->bitmap &= ~(1 << idr_pos(id, layer)); } error = 0; out: #ifdef INVARIANTS if (error == 0 && idr_find_locked(idr, id) != ptr) { panic("idr_get_new_above: Failed for idr %p, id %d, ptr %p\n", idr, id, ptr); } #endif return (error); } int idr_get_new_above(struct idr *idr, void *ptr, int starting_id, int *idp) { int retval; mtx_lock(&idr->lock); retval = idr_get_new_above_locked(idr, ptr, starting_id, idp); mtx_unlock(&idr->lock); return (retval); } int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) { return (idr_get_new_above(&ida->idr, NULL, starting_id, p_id)); } static int idr_alloc_locked(struct idr *idr, void *ptr, int start, int end) { int max = end > 0 ? end - 1 : INT_MAX; int error; int id; mtx_assert(&idr->lock, MA_OWNED); if (unlikely(start < 0)) return (-EINVAL); if (unlikely(max < start)) return (-ENOSPC); if (start == 0) error = idr_get_new_locked(idr, ptr, &id); else error = idr_get_new_above_locked(idr, ptr, start, &id); if (unlikely(error < 0)) return (error); if (unlikely(id > max)) { idr_remove_locked(idr, id); return (-ENOSPC); } return (id); } int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) { int retval; mtx_lock(&idr->lock); retval = idr_alloc_locked(idr, ptr, start, end); mtx_unlock(&idr->lock); return (retval); } int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) { int retval; mtx_lock(&idr->lock); retval = idr_alloc_locked(idr, ptr, max(start, idr->next_cyclic_id), end); if (unlikely(retval == -ENOSPC)) retval = idr_alloc_locked(idr, ptr, start, end); if (likely(retval >= 0)) idr->next_cyclic_id = retval + 1; mtx_unlock(&idr->lock); return (retval); } static int idr_for_each_layer(struct idr_layer *il, int offset, int layer, int (*f)(int id, void *p, void *data), void *data) { int i, err; if (il == NULL) return (0); if (layer == 0) { for (i = 0; i < IDR_SIZE; i++) { if (il->ary[i] == NULL) continue; err = f(i + offset, il->ary[i], data); if (err) return (err); } return (0); } for (i = 0; i < IDR_SIZE; i++) { if (il->ary[i] == NULL) continue; err = idr_for_each_layer(il->ary[i], (i + offset) * IDR_SIZE, layer - 1, f, data); if (err) return (err); } return (0); } /* NOTE: It is not allowed to modify the IDR tree while it is being iterated */ int idr_for_each(struct idr *idp, int (*f)(int id, void *p, void *data), void *data) { return (idr_for_each_layer(idp->top, 0, idp->layers - 1, f, data)); } static int idr_has_entry(int id, void *p, void *data) { return (1); } bool idr_is_empty(struct idr *idp) { return (idr_for_each(idp, idr_has_entry, NULL) == 0); } int ida_pre_get(struct ida *ida, gfp_t flags) { if (idr_pre_get(&ida->idr, flags) == 0) return (0); if (ida->free_bitmap == NULL) { ida->free_bitmap = malloc(sizeof(struct ida_bitmap), M_IDR, flags); } return (ida->free_bitmap != NULL); } int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, gfp_t flags) { int ret, id; unsigned int max; MPASS((int)start >= 0); if ((int)end <= 0) max = INT_MAX; else { MPASS(end > start); max = end - 1; } again: if (!ida_pre_get(ida, flags)) return (-ENOMEM); if ((ret = ida_get_new_above(ida, start, &id)) == 0) { if (id > max) { ida_remove(ida, id); ret = -ENOSPC; } else { ret = id; } } if (__predict_false(ret == -EAGAIN)) goto again; return (ret); } void ida_simple_remove(struct ida *ida, unsigned int id) { idr_remove(&ida->idr, id); } void ida_remove(struct ida *ida, int id) { idr_remove(&ida->idr, id); } void ida_init(struct ida *ida) { idr_init(&ida->idr); } void ida_destroy(struct ida *ida) { idr_destroy(&ida->idr); free(ida->free_bitmap, M_IDR); } diff --git a/sys/dev/mlx4/mlx4_en/mlx4_en_tx.c b/sys/dev/mlx4/mlx4_en/mlx4_en_tx.c index ee8ed0da240d..9a73c7571fd7 100644 --- a/sys/dev/mlx4/mlx4_en/mlx4_en_tx.c +++ b/sys/dev/mlx4/mlx4_en/mlx4_en_tx.c @@ -1,1002 +1,1002 @@ /* * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #define LINUXKPI_PARAM_PREFIX mlx4_ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "en.h" int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring **pring, u32 size, u16 stride, int node, int queue_idx) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_tx_ring *ring; uint32_t x; int tmp; int err; ring = kzalloc_node(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL, node); if (!ring) { ring = kzalloc(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL); if (!ring) { en_err(priv, "Failed allocating TX ring\n"); return -ENOMEM; } } /* Create DMA descriptor TAG */ if ((err = -bus_dma_tag_create( bus_get_dma_tag(mdev->pdev->dev.bsddev), 1, /* any alignment */ 0, /* no boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MLX4_EN_TX_MAX_PAYLOAD_SIZE, /* maxsize */ MLX4_EN_TX_MAX_MBUF_FRAGS, /* nsegments */ MLX4_EN_TX_MAX_MBUF_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &ring->dma_tag))) goto done; ring->size = size; ring->size_mask = size - 1; ring->stride = stride; ring->inline_thold = MAX(MIN_PKT_LEN, MIN(priv->prof->inline_thold, MAX_INLINE)); - mtx_init(&ring->tx_lock.m, "mlx4 tx", NULL, MTX_DEF); - mtx_init(&ring->comp_lock.m, "mlx4 comp", NULL, MTX_DEF); + mtx_init(&ring->tx_lock, "mlx4 tx", NULL, MTX_DEF); + mtx_init(&ring->comp_lock, "mlx4 comp", NULL, MTX_DEF); tmp = size * sizeof(struct mlx4_en_tx_info); ring->tx_info = kzalloc_node(tmp, GFP_KERNEL, node); if (!ring->tx_info) { ring->tx_info = kzalloc(tmp, GFP_KERNEL); if (!ring->tx_info) { err = -ENOMEM; goto err_ring; } } /* Create DMA descriptor MAPs */ for (x = 0; x != size; x++) { err = -bus_dmamap_create(ring->dma_tag, 0, &ring->tx_info[x].dma_map); if (err != 0) { while (x--) { bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map); } goto err_info; } } en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", ring->tx_info, tmp); ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); /* Allocate HW buffers on provided NUMA node */ err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 2 * PAGE_SIZE); if (err) { en_err(priv, "Failed allocating hwq resources\n"); goto err_dma_map; } err = mlx4_en_map_buffer(&ring->wqres.buf); if (err) { en_err(priv, "Failed to map TX buffer\n"); goto err_hwq_res; } ring->buf = ring->wqres.buf.direct.buf; en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn, MLX4_RESERVE_ETH_BF_QP); if (err) { en_err(priv, "failed reserving qp for TX ring\n"); goto err_map; } err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); if (err) { en_err(priv, "Failed allocating qp %d\n", ring->qpn); goto err_reserve; } ring->qp.event = mlx4_en_sqp_event; err = mlx4_bf_alloc(mdev->dev, &ring->bf, node); if (err) { en_dbg(DRV, priv, "working without blueflame (%d)", err); ring->bf.uar = &mdev->priv_uar; ring->bf.uar->map = mdev->uar_map; ring->bf_enabled = false; } else ring->bf_enabled = true; ring->queue_index = queue_idx; *pring = ring; return 0; err_reserve: mlx4_qp_release_range(mdev->dev, ring->qpn, 1); err_map: mlx4_en_unmap_buffer(&ring->wqres.buf); err_hwq_res: mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); err_dma_map: for (x = 0; x != size; x++) bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map); err_info: vfree(ring->tx_info); err_ring: bus_dma_tag_destroy(ring->dma_tag); done: kfree(ring); return err; } void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring **pring) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_tx_ring *ring = *pring; uint32_t x; en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); if (ring->bf_enabled) mlx4_bf_free(mdev->dev, &ring->bf); mlx4_qp_remove(mdev->dev, &ring->qp); mlx4_qp_free(mdev->dev, &ring->qp); mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1); mlx4_en_unmap_buffer(&ring->wqres.buf); mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); for (x = 0; x != ring->size; x++) bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map); vfree(ring->tx_info); - mtx_destroy(&ring->tx_lock.m); - mtx_destroy(&ring->comp_lock.m); + mtx_destroy(&ring->tx_lock); + mtx_destroy(&ring->comp_lock); bus_dma_tag_destroy(ring->dma_tag); kfree(ring); *pring = NULL; } int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, int cq, int user_prio) { struct mlx4_en_dev *mdev = priv->mdev; int err; ring->cqn = cq; ring->prod = 0; ring->cons = 0xffffffff; ring->last_nr_txbb = 1; ring->poll_cnt = 0; memset(ring->buf, 0, ring->buf_size); ring->watchdog_time = 0; ring->qp_state = MLX4_QP_STATE_RST; ring->doorbell_qpn = ring->qp.qpn << 8; mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, ring->cqn, user_prio, &ring->context); if (ring->bf_enabled) ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, &ring->qp, &ring->qp_state); return err; } void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring) { struct mlx4_en_dev *mdev = priv->mdev; mlx4_qp_modify(mdev->dev, NULL, ring->qp_state, MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); } static volatile struct mlx4_wqe_data_seg * mlx4_en_store_inline_lso_data(volatile struct mlx4_wqe_data_seg *dseg, struct mbuf *mb, int len, __be32 owner_bit) { uint8_t *inl = __DEVOLATILE(uint8_t *, dseg); /* copy data into place */ m_copydata(mb, 0, len, inl + 4); dseg += DIV_ROUND_UP(4 + len, DS_SIZE_ALIGNMENT); return (dseg); } static void mlx4_en_store_inline_lso_header(volatile struct mlx4_wqe_data_seg *dseg, int len, __be32 owner_bit) { } static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, u32 index, u8 owner) { struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; struct mlx4_en_tx_desc *tx_desc = (struct mlx4_en_tx_desc *) (ring->buf + (index * TXBB_SIZE)); volatile __be32 *ptr = (__be32 *)tx_desc; const __be32 stamp = cpu_to_be32(STAMP_VAL | ((u32)owner << STAMP_SHIFT)); u32 i; /* Stamp the freed descriptor */ for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { *ptr = stamp; ptr += STAMP_DWORDS; } } static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, u32 index) { struct mlx4_en_tx_info *tx_info; struct mbuf *mb; tx_info = &ring->tx_info[index]; mb = tx_info->mb; if (mb == NULL) goto done; bus_dmamap_sync(ring->dma_tag, tx_info->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->dma_tag, tx_info->dma_map); m_freem(mb); done: return (tx_info->nr_txbb); } int mlx4_en_free_tx_buf(if_t dev, struct mlx4_en_tx_ring *ring) { struct mlx4_en_priv *priv = mlx4_netdev_priv(dev); int cnt = 0; /* Skip last polled descriptor */ ring->cons += ring->last_nr_txbb; en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", ring->cons, ring->prod); if ((u32) (ring->prod - ring->cons) > ring->size) { en_warn(priv, "Tx consumer passed producer!\n"); return 0; } while (ring->cons != ring->prod) { ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, ring->cons & ring->size_mask); ring->cons += ring->last_nr_txbb; cnt++; } if (cnt) en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); return cnt; } static bool mlx4_en_tx_ring_is_full(struct mlx4_en_tx_ring *ring) { int wqs; wqs = ring->size - (ring->prod - ring->cons); return (wqs < (HEADROOM + (2 * MLX4_EN_TX_WQE_MAX_WQEBBS))); } static int mlx4_en_process_tx_cq(if_t dev, struct mlx4_en_cq *cq) { struct mlx4_en_priv *priv = mlx4_netdev_priv(dev); struct mlx4_cq *mcq = &cq->mcq; struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; struct mlx4_cqe *cqe; u16 index; u16 new_index, ring_index, stamp_index; u32 txbbs_skipped = 0; u32 txbbs_stamp = 0; u32 cons_index = mcq->cons_index; int size = cq->size; u32 size_mask = ring->size_mask; struct mlx4_cqe *buf = cq->buf; int factor = priv->cqe_factor; if (!priv->port_up) return 0; index = cons_index & size_mask; cqe = &buf[(index << factor) + factor]; ring_index = ring->cons & size_mask; stamp_index = ring_index; /* Process all completed CQEs */ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, cons_index & size)) { /* * make sure we read the CQE after we read the * ownership bit */ rmb(); if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_ERROR)) { en_err(priv, "CQE completed in error - vendor syndrom: 0x%x syndrom: 0x%x\n", ((struct mlx4_err_cqe *)cqe)-> vendor_err_syndrome, ((struct mlx4_err_cqe *)cqe)->syndrome); } /* Skip over last polled CQE */ new_index = be16_to_cpu(cqe->wqe_index) & size_mask; do { txbbs_skipped += ring->last_nr_txbb; ring_index = (ring_index + ring->last_nr_txbb) & size_mask; /* free next descriptor */ ring->last_nr_txbb = mlx4_en_free_tx_desc( priv, ring, ring_index); mlx4_en_stamp_wqe(priv, ring, stamp_index, !!((ring->cons + txbbs_stamp) & ring->size)); stamp_index = ring_index; txbbs_stamp = txbbs_skipped; } while (ring_index != new_index); ++cons_index; index = cons_index & size_mask; cqe = &buf[(index << factor) + factor]; } /* * To prevent CQ overflow we first update CQ consumer and only then * the ring consumer. */ mcq->cons_index = cons_index; mlx4_cq_set_ci(mcq); wmb(); ring->cons += txbbs_skipped; return (0); } void mlx4_en_tx_irq(struct mlx4_cq *mcq) { struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); struct mlx4_en_priv *priv = mlx4_netdev_priv(cq->dev); struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; if (priv->port_up == 0 || !spin_trylock(&ring->comp_lock)) return; mlx4_en_process_tx_cq(cq->dev, cq); mod_timer(&cq->timer, jiffies + 1); spin_unlock(&ring->comp_lock); } void mlx4_en_poll_tx_cq(unsigned long data) { struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data; struct mlx4_en_priv *priv = mlx4_netdev_priv(cq->dev); struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; u32 inflight; INC_PERF_COUNTER(priv->pstats.tx_poll); if (priv->port_up == 0) return; if (!spin_trylock(&ring->comp_lock)) { mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); return; } mlx4_en_process_tx_cq(cq->dev, cq); inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb); /* If there are still packets in flight and the timer has not already * been scheduled by the Tx routine then schedule it here to guarantee * completion processing of these packets */ if (inflight && priv->port_up) mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); spin_unlock(&ring->comp_lock); } static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) { struct mlx4_en_cq *cq = priv->tx_cq[tx_ind]; struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind]; if (priv->port_up == 0) return; /* If we don't have a pending timer, set one up to catch our recent post in case the interface becomes idle */ if (!timer_pending(&cq->timer)) mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) if (spin_trylock(&ring->comp_lock)) { mlx4_en_process_tx_cq(priv->dev, cq); spin_unlock(&ring->comp_lock); } } static u16 mlx4_en_get_inline_hdr_size(struct mlx4_en_tx_ring *ring, struct mbuf *mb) { u16 retval; /* only copy from first fragment, if possible */ retval = MIN(ring->inline_thold, mb->m_len); /* check for too little data */ if (unlikely(retval < MIN_PKT_LEN)) retval = MIN(ring->inline_thold, mb->m_pkthdr.len); return (retval); } static int mlx4_en_get_header_size(struct mbuf *mb) { struct ether_vlan_header *eh; struct tcphdr *th; struct ip *ip; int ip_hlen, tcp_hlen; struct ip6_hdr *ip6; uint16_t eth_type; int eth_hdr_len; eh = mtod(mb, struct ether_vlan_header *); if (mb->m_len < ETHER_HDR_LEN) return (0); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { eth_type = ntohs(eh->evl_proto); eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { eth_type = ntohs(eh->evl_encap_proto); eth_hdr_len = ETHER_HDR_LEN; } if (mb->m_len < eth_hdr_len) return (0); switch (eth_type) { case ETHERTYPE_IP: ip = (struct ip *)(mb->m_data + eth_hdr_len); if (mb->m_len < eth_hdr_len + sizeof(*ip)) return (0); if (ip->ip_p != IPPROTO_TCP) return (0); ip_hlen = ip->ip_hl << 2; eth_hdr_len += ip_hlen; break; case ETHERTYPE_IPV6: ip6 = (struct ip6_hdr *)(mb->m_data + eth_hdr_len); if (mb->m_len < eth_hdr_len + sizeof(*ip6)) return (0); if (ip6->ip6_nxt != IPPROTO_TCP) return (0); eth_hdr_len += sizeof(*ip6); break; default: return (0); } if (mb->m_len < eth_hdr_len + sizeof(*th)) return (0); th = (struct tcphdr *)(mb->m_data + eth_hdr_len); tcp_hlen = th->th_off << 2; eth_hdr_len += tcp_hlen; if (mb->m_len < eth_hdr_len) return (0); return (eth_hdr_len); } static volatile struct mlx4_wqe_data_seg * mlx4_en_store_inline_data(volatile struct mlx4_wqe_data_seg *dseg, struct mbuf *mb, int len, __be32 owner_bit) { uint8_t *inl = __DEVOLATILE(uint8_t *, dseg); const int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - 4; if (unlikely(len < MIN_PKT_LEN)) { m_copydata(mb, 0, len, inl + 4); memset(inl + 4 + len, 0, MIN_PKT_LEN - len); dseg += DIV_ROUND_UP(4 + MIN_PKT_LEN, DS_SIZE_ALIGNMENT); } else if (len <= spc) { m_copydata(mb, 0, len, inl + 4); dseg += DIV_ROUND_UP(4 + len, DS_SIZE_ALIGNMENT); } else { m_copydata(mb, 0, spc, inl + 4); m_copydata(mb, spc, len - spc, inl + 8 + spc); dseg += DIV_ROUND_UP(8 + len, DS_SIZE_ALIGNMENT); } return (dseg); } static void mlx4_en_store_inline_header(volatile struct mlx4_wqe_data_seg *dseg, int len, __be32 owner_bit) { uint8_t *inl = __DEVOLATILE(uint8_t *, dseg); const int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - 4; if (unlikely(len < MIN_PKT_LEN)) { *(volatile uint32_t *)inl = SET_BYTE_COUNT((1U << 31) | MIN_PKT_LEN); } else if (len <= spc) { *(volatile uint32_t *)inl = SET_BYTE_COUNT((1U << 31) | len); } else { *(volatile uint32_t *)(inl + 4 + spc) = SET_BYTE_COUNT((1U << 31) | (len - spc)); wmb(); *(volatile uint32_t *)inl = SET_BYTE_COUNT((1U << 31) | spc); } } static uint32_t hashrandom; static void hashrandom_init(void *arg) { /* * It is assumed that the random subsystem has been * initialized when this function is called: */ hashrandom = m_ether_tcpip_hash_init(); } SYSINIT(hashrandom_init, SI_SUB_RANDOM, SI_ORDER_ANY, &hashrandom_init, NULL); u16 mlx4_en_select_queue(if_t dev, struct mbuf *mb) { struct mlx4_en_priv *priv = mlx4_netdev_priv(dev); u32 rings_p_up = priv->num_tx_rings_p_up; u32 up = 0; u32 queue_index; #if (MLX4_EN_NUM_UP > 1) /* Obtain VLAN information if present */ if (mb->m_flags & M_VLANTAG) { u32 vlan_tag = mb->m_pkthdr.ether_vtag; up = (vlan_tag >> 13) % MLX4_EN_NUM_UP; } #endif queue_index = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 | MBUF_HASHFLAG_L4, mb, hashrandom); return ((queue_index % rings_p_up) + (up * rings_p_up)); } static void mlx4_bf_copy(void __iomem *dst, volatile unsigned long *src, unsigned bytecnt) { __iowrite64_copy(dst, __DEVOLATILE(void *, src), bytecnt / 8); } int mlx4_en_xmit(struct mlx4_en_priv *priv, int tx_ind, struct mbuf **mbp) { enum { DS_FACT = TXBB_SIZE / DS_SIZE_ALIGNMENT, CTRL_FLAGS = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | MLX4_WQE_CTRL_SOLICITED), }; bus_dma_segment_t segs[MLX4_EN_TX_MAX_MBUF_FRAGS]; volatile struct mlx4_wqe_data_seg *dseg; volatile struct mlx4_wqe_data_seg *dseg_inline; volatile struct mlx4_en_tx_desc *tx_desc; struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind]; if_t ifp = priv->dev; struct mlx4_en_tx_info *tx_info; struct mbuf *mb = *mbp; struct mbuf *m; __be32 owner_bit; int nr_segs; int pad; int err; u32 bf_size; u32 bf_prod; u32 opcode; u16 index; u16 ds_cnt; u16 ihs; if (unlikely(!priv->port_up)) { err = EINVAL; goto tx_drop; } /* check if TX ring is full */ if (unlikely(mlx4_en_tx_ring_is_full(ring))) { /* Use interrupts to find out when queue opened */ mlx4_en_arm_cq(priv, priv->tx_cq[tx_ind]); return (ENOBUFS); } /* sanity check we are not wrapping around */ KASSERT(((~ring->prod) & ring->size_mask) >= (MLX4_EN_TX_WQE_MAX_WQEBBS - 1), ("Wrapping around TX ring")); /* Track current inflight packets for performance analysis */ AVG_PERF_COUNTER(priv->pstats.inflight_avg, (u32) (ring->prod - ring->cons - 1)); /* Track current mbuf packet header length */ AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, mb->m_pkthdr.len); /* Grab an index and try to transmit packet */ owner_bit = (ring->prod & ring->size) ? cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0; index = ring->prod & ring->size_mask; tx_desc = (volatile struct mlx4_en_tx_desc *) (ring->buf + index * TXBB_SIZE); tx_info = &ring->tx_info[index]; dseg = &tx_desc->data; /* send a copy of the frame to the BPF listener, if any */ if (ifp != NULL && if_getbpf(ifp) != NULL) ETHER_BPF_MTAP(ifp, mb); /* get default flags */ tx_desc->ctrl.srcrb_flags = CTRL_FLAGS; if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM); if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_TCP_UDP_CSUM); /* do statistics */ if (likely(tx_desc->ctrl.srcrb_flags != CTRL_FLAGS)) { priv->port_stats.tx_chksum_offload++; ring->tx_csum++; } /* check for VLAN tag */ if (mb->m_flags & M_VLANTAG) { tx_desc->ctrl.vlan_tag = cpu_to_be16(mb->m_pkthdr.ether_vtag); tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN; } else { tx_desc->ctrl.vlan_tag = 0; tx_desc->ctrl.ins_vlan = 0; } if (unlikely(mlx4_is_mfunc(priv->mdev->dev) || priv->validate_loopback)) { /* * Copy destination MAC address to WQE. This allows * loopback in eSwitch, so that VFs and PF can * communicate with each other: */ m_copydata(mb, 0, 2, __DEVOLATILE(void *, &tx_desc->ctrl.srcrb_flags16[0])); m_copydata(mb, 2, 4, __DEVOLATILE(void *, &tx_desc->ctrl.imm)); } else { /* clear immediate field */ tx_desc->ctrl.imm = 0; } /* Handle LSO (TSO) packets */ if (mb->m_pkthdr.csum_flags & CSUM_TSO) { u32 payload_len; u32 mss = mb->m_pkthdr.tso_segsz; u32 num_pkts; opcode = cpu_to_be32(MLX4_OPCODE_LSO | MLX4_WQE_CTRL_RR) | owner_bit; ihs = mlx4_en_get_header_size(mb); if (unlikely(ihs > MAX_INLINE)) { ring->oversized_packets++; err = EINVAL; goto tx_drop; } tx_desc->lso.mss_hdr_size = cpu_to_be32((mss << 16) | ihs); payload_len = mb->m_pkthdr.len - ihs; if (unlikely(payload_len == 0)) num_pkts = 1; else num_pkts = DIV_ROUND_UP(payload_len, mss); ring->bytes += payload_len + (num_pkts * ihs); ring->packets += num_pkts; ring->tso_packets++; /* store pointer to inline header */ dseg_inline = dseg; /* copy data inline */ dseg = mlx4_en_store_inline_lso_data(dseg, mb, ihs, owner_bit); } else { opcode = cpu_to_be32(MLX4_OPCODE_SEND) | owner_bit; ihs = mlx4_en_get_inline_hdr_size(ring, mb); ring->bytes += max_t (unsigned int, mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN); ring->packets++; /* store pointer to inline header */ dseg_inline = dseg; /* copy data inline */ dseg = mlx4_en_store_inline_data(dseg, mb, ihs, owner_bit); } m_adj(mb, ihs); err = bus_dmamap_load_mbuf_sg(ring->dma_tag, tx_info->dma_map, mb, segs, &nr_segs, BUS_DMA_NOWAIT); if (unlikely(err == EFBIG)) { /* Too many mbuf fragments */ ring->defrag_attempts++; m = m_defrag(mb, M_NOWAIT); if (m == NULL) { ring->oversized_packets++; goto tx_drop; } mb = m; /* Try again */ err = bus_dmamap_load_mbuf_sg(ring->dma_tag, tx_info->dma_map, mb, segs, &nr_segs, BUS_DMA_NOWAIT); } /* catch errors */ if (unlikely(err != 0)) { ring->oversized_packets++; goto tx_drop; } /* If there were no errors and we didn't load anything, don't sync. */ if (nr_segs != 0) { /* make sure all mbuf data is written to RAM */ bus_dmamap_sync(ring->dma_tag, tx_info->dma_map, BUS_DMASYNC_PREWRITE); } else { /* All data was inlined, free the mbuf. */ bus_dmamap_unload(ring->dma_tag, tx_info->dma_map); m_freem(mb); mb = NULL; } /* compute number of DS needed */ ds_cnt = (dseg - ((volatile struct mlx4_wqe_data_seg *)tx_desc)) + nr_segs; /* * Check if the next request can wrap around and fill the end * of the current request with zero immediate data: */ pad = DIV_ROUND_UP(ds_cnt, DS_FACT); pad = (~(ring->prod + pad)) & ring->size_mask; if (unlikely(pad < (MLX4_EN_TX_WQE_MAX_WQEBBS - 1))) { /* * Compute the least number of DS blocks we need to * pad in order to achieve a TX ring wraparound: */ pad = (DS_FACT * (pad + 1)); } else { /* * The hardware will automatically jump to the next * TXBB. No need for padding. */ pad = 0; } /* compute total number of DS blocks */ ds_cnt += pad; /* * When modifying this code, please ensure that the following * computation is always less than or equal to 0x3F: * * ((MLX4_EN_TX_WQE_MAX_WQEBBS - 1) * DS_FACT) + * (MLX4_EN_TX_WQE_MAX_WQEBBS * DS_FACT) * * Else the "ds_cnt" variable can become too big. */ tx_desc->ctrl.fence_size = (ds_cnt & 0x3f); /* store pointer to mbuf */ tx_info->mb = mb; tx_info->nr_txbb = DIV_ROUND_UP(ds_cnt, DS_FACT); bf_size = ds_cnt * DS_SIZE_ALIGNMENT; bf_prod = ring->prod; /* compute end of "dseg" array */ dseg += nr_segs + pad; /* pad using zero immediate dseg */ while (pad--) { dseg--; dseg->addr = 0; dseg->lkey = 0; wmb(); dseg->byte_count = SET_BYTE_COUNT((1U << 31)|0); } /* fill segment list */ while (nr_segs--) { if (unlikely(segs[nr_segs].ds_len == 0)) { dseg--; dseg->addr = 0; dseg->lkey = 0; wmb(); dseg->byte_count = SET_BYTE_COUNT((1U << 31)|0); } else { dseg--; dseg->addr = cpu_to_be64((uint64_t)segs[nr_segs].ds_addr); dseg->lkey = cpu_to_be32(priv->mdev->mr.key); wmb(); dseg->byte_count = SET_BYTE_COUNT((uint32_t)segs[nr_segs].ds_len); } } wmb(); /* write owner bits in reverse order */ if ((opcode & cpu_to_be32(0x1F)) == cpu_to_be32(MLX4_OPCODE_LSO)) mlx4_en_store_inline_lso_header(dseg_inline, ihs, owner_bit); else mlx4_en_store_inline_header(dseg_inline, ihs, owner_bit); /* update producer counter */ ring->prod += tx_info->nr_txbb; if (ring->bf_enabled && bf_size <= MAX_BF && (tx_desc->ctrl.ins_vlan != MLX4_WQE_CTRL_INS_CVLAN)) { /* store doorbell number */ *(volatile __be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); /* or in producer number for this WQE */ opcode |= cpu_to_be32((bf_prod & 0xffff) << 8); /* * Ensure the new descriptor hits memory before * setting ownership of this descriptor to HW: */ wmb(); tx_desc->ctrl.owner_opcode = opcode; wmb(); mlx4_bf_copy(((u8 *)ring->bf.reg) + ring->bf.offset, (volatile unsigned long *) &tx_desc->ctrl, bf_size); wmb(); ring->bf.offset ^= ring->bf.buf_size; } else { /* * Ensure the new descriptor hits memory before * setting ownership of this descriptor to HW: */ wmb(); tx_desc->ctrl.owner_opcode = opcode; wmb(); writel(cpu_to_be32(ring->doorbell_qpn), ((u8 *)ring->bf.uar->map) + MLX4_SEND_DOORBELL); } return (0); tx_drop: *mbp = NULL; m_freem(mb); return (err); } static int mlx4_en_transmit_locked(if_t ifp, int tx_ind, struct mbuf *mb) { struct mlx4_en_priv *priv = mlx4_netdev_priv(ifp); struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind]; int err = 0; if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 || READ_ONCE(priv->port_up) == 0)) { m_freem(mb); return (ENETDOWN); } if (mlx4_en_xmit(priv, tx_ind, &mb) != 0) { /* NOTE: m_freem() is NULL safe */ m_freem(mb); err = ENOBUFS; if (ring->watchdog_time == 0) ring->watchdog_time = ticks + MLX4_EN_WATCHDOG_TIMEOUT; } else { ring->watchdog_time = 0; } return (err); } int mlx4_en_transmit(if_t dev, struct mbuf *m) { struct mlx4_en_priv *priv = mlx4_netdev_priv(dev); struct mlx4_en_tx_ring *ring; int i, err = 0; if (priv->port_up == 0) { m_freem(m); return (ENETDOWN); } /* Compute which queue to use */ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { i = (m->m_pkthdr.flowid % 128) % priv->tx_ring_num; } else { i = mlx4_en_select_queue(dev, m); } ring = priv->tx_ring[i]; spin_lock(&ring->tx_lock); err = mlx4_en_transmit_locked(dev, i, m); spin_unlock(&ring->tx_lock); /* Poll CQ here */ mlx4_en_xmit_poll(priv, i); if (unlikely(err != 0)) if_inc_counter(dev, IFCOUNTER_IQDROPS, 1); return (err); } /* * Flush ring buffers. */ void mlx4_en_qflush(if_t dev) { struct mlx4_en_priv *priv = mlx4_netdev_priv(dev); if (priv->port_up == 0) return; if_qflush(dev); }