Index: head/sys/compat/linuxkpi/common/include/asm/byteorder.h =================================================================== --- head/sys/compat/linuxkpi/common/include/asm/byteorder.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/asm/byteorder.h (revision 331433) @@ -1,94 +1,94 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2016 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _ASM_BYTEORDER_H_ #define _ASM_BYTEORDER_H_ #include #include #include #if BYTE_ORDER == LITTLE_ENDIAN #define __LITTLE_ENDIAN #else #define __BIG_ENDIAN #endif #define cpu_to_le64(x) htole64(x) #define le64_to_cpu(x) le64toh(x) #define cpu_to_le32(x) htole32(x) #define le32_to_cpu(x) le32toh(x) #define cpu_to_le16(x) htole16(x) #define le16_to_cpu(x) le16toh(x) #define cpu_to_be64(x) htobe64(x) #define be64_to_cpu(x) be64toh(x) #define cpu_to_be32(x) htobe32(x) #define be32_to_cpu(x) be32toh(x) #define cpu_to_be16(x) htobe16(x) #define be16_to_cpu(x) be16toh(x) #define __be16_to_cpu(x) be16toh(x) #define cpu_to_le64p(x) htole64(*((const uint64_t *)(x))) #define le64_to_cpup(x) le64toh(*((const uint64_t *)(x))) #define cpu_to_le32p(x) htole32(*((const uint32_t *)(x))) #define le32_to_cpup(x) le32toh(*((const uint32_t *)(x))) #define cpu_to_le16p(x) htole16(*((const uint16_t *)(x))) #define le16_to_cpup(x) le16toh(*((const uint16_t *)(x))) #define cpu_to_be64p(x) htobe64(*((const uint64_t *)(x))) #define be64_to_cpup(x) be64toh(*((const uint64_t *)(x))) #define cpu_to_be32p(x) htobe32(*((const uint32_t *)(x))) #define be32_to_cpup(x) be32toh(*((const uint32_t *)(x))) #define cpu_to_be16p(x) htobe16(*((const uint16_t *)(x))) #define be16_to_cpup(x) be16toh(*((const uint16_t *)(x))) #define cpu_to_le64s(x) do { *((uint64_t *)(x)) = cpu_to_le64p((x)); } while (0) #define le64_to_cpus(x) do { *((uint64_t *)(x)) = le64_to_cpup((x)); } while (0) #define cpu_to_le32s(x) do { *((uint32_t *)(x)) = cpu_to_le32p((x)); } while (0) #define le32_to_cpus(x) do { *((uint32_t *)(x)) = le32_to_cpup((x)); } while (0) #define cpu_to_le16s(x) do { *((uint16_t *)(x)) = cpu_to_le16p((x)); } while (0) #define le16_to_cpus(x) do { *((uint16_t *)(x)) = le16_to_cpup((x)); } while (0) #define cpu_to_be64s(x) do { *((uint64_t *)(x)) = cpu_to_be64p((x)); } while (0) #define be64_to_cpus(x) do { *((uint64_t *)(x)) = be64_to_cpup((x)); } while (0) #define cpu_to_be32s(x) do { *((uint32_t *)(x)) = cpu_to_be32p((x)); } while (0) #define be32_to_cpus(x) do { *((uint32_t *)(x)) = be32_to_cpup((x)); } while (0) #define cpu_to_be16s(x) do { *((uint16_t *)(x)) = cpu_to_be16p((x)); } while (0) #define be16_to_cpus(x) do { *((uint16_t *)(x)) = be16_to_cpup((x)); } while (0) #define swab16(x) bswap16(x) #define swab32(x) bswap32(x) #define swab64(x) bswap64(x) static inline void be16_add_cpu(uint16_t *var, uint16_t val) -{ +{ *var = cpu_to_be16(be16_to_cpu(*var) + val); } #endif /* _ASM_BYTEORDER_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/bitops.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/bitops.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/bitops.h (revision 331433) @@ -1,406 +1,406 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_BITOPS_H_ #define _LINUX_BITOPS_H_ #include #include #include #include #include #define BIT(nr) (1UL << (nr)) #define BIT_ULL(nr) (1ULL << (nr)) #ifdef __LP64__ #define BITS_PER_LONG 64 #else #define BITS_PER_LONG 32 #endif #define BITS_PER_LONG_LONG 64 #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG)) #define BITMAP_LAST_WORD_MASK(n) (~0UL >> (BITS_PER_LONG - (n))) #define BITS_TO_LONGS(n) howmany((n), BITS_PER_LONG) #define BIT_MASK(nr) (1UL << ((nr) & (BITS_PER_LONG - 1))) #define BIT_WORD(nr) ((nr) / BITS_PER_LONG) #define GENMASK(h, l) (((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l))) #define GENMASK_ULL(h, l) (((~0ULL) >> (BITS_PER_LONG_LONG - (h) - 1)) & ((~0ULL) << (l))) -#define BITS_PER_BYTE 8 +#define BITS_PER_BYTE 8 #define hweight8(x) bitcount((uint8_t)(x)) #define hweight16(x) bitcount16(x) #define hweight32(x) bitcount32(x) #define hweight64(x) bitcount64(x) #define hweight_long(x) bitcountl(x) static inline int __ffs(int mask) { return (ffs(mask) - 1); } static inline int __fls(int mask) { return (fls(mask) - 1); } static inline int __ffsl(long mask) { return (ffsl(mask) - 1); } static inline int __flsl(long mask) { return (flsl(mask) - 1); } static inline int fls64(uint64_t mask) { return (flsll(mask)); } static inline uint32_t ror32(uint32_t word, unsigned int shift) { return ((word >> shift) | (word << (32 - shift))); } #define ffz(mask) __ffs(~(mask)) static inline int get_count_order(unsigned int count) { int order; order = fls(count) - 1; if (count & (count - 1)) order++; return order; } static inline unsigned long find_first_bit(const unsigned long *addr, unsigned long size) { long mask; int bit; for (bit = 0; size >= BITS_PER_LONG; size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) { if (*addr == 0) continue; return (bit + __ffsl(*addr)); } if (size) { mask = (*addr) & BITMAP_LAST_WORD_MASK(size); if (mask) bit += __ffsl(mask); else bit += size; } return (bit); } static inline unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) { long mask; int bit; for (bit = 0; size >= BITS_PER_LONG; size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) { if (~(*addr) == 0) continue; return (bit + __ffsl(~(*addr))); } if (size) { mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size); if (mask) bit += __ffsl(mask); else bit += size; } return (bit); } static inline unsigned long find_last_bit(const unsigned long *addr, unsigned long size) { long mask; int offs; int bit; int pos; pos = size / BITS_PER_LONG; offs = size % BITS_PER_LONG; bit = BITS_PER_LONG * pos; addr += pos; if (offs) { mask = (*addr) & BITMAP_LAST_WORD_MASK(offs); if (mask) return (bit + __flsl(mask)); } while (pos--) { addr--; bit -= BITS_PER_LONG; if (*addr) return (bit + __flsl(*addr)); } return (size); } static inline unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset) { long mask; int offs; int bit; int pos; if (offset >= size) return (size); pos = offset / BITS_PER_LONG; offs = offset % BITS_PER_LONG; bit = BITS_PER_LONG * pos; addr += pos; if (offs) { mask = (*addr) & ~BITMAP_LAST_WORD_MASK(offs); if (mask) return (bit + __ffsl(mask)); if (size - bit <= BITS_PER_LONG) return (size); bit += BITS_PER_LONG; addr++; } for (size -= bit; size >= BITS_PER_LONG; size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) { if (*addr == 0) continue; return (bit + __ffsl(*addr)); } if (size) { mask = (*addr) & BITMAP_LAST_WORD_MASK(size); if (mask) bit += __ffsl(mask); else bit += size; } return (bit); } static inline unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset) { long mask; int offs; int bit; int pos; if (offset >= size) return (size); pos = offset / BITS_PER_LONG; offs = offset % BITS_PER_LONG; bit = BITS_PER_LONG * pos; addr += pos; if (offs) { mask = ~(*addr) & ~BITMAP_LAST_WORD_MASK(offs); if (mask) return (bit + __ffsl(mask)); if (size - bit <= BITS_PER_LONG) return (size); bit += BITS_PER_LONG; addr++; } for (size -= bit; size >= BITS_PER_LONG; size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) { if (~(*addr) == 0) continue; return (bit + __ffsl(~(*addr))); } if (size) { mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size); if (mask) bit += __ffsl(mask); else bit += size; } return (bit); } #define __set_bit(i, a) \ atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i)) #define set_bit(i, a) \ atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i)) #define __clear_bit(i, a) \ atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i)) #define clear_bit(i, a) \ atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i)) #define test_bit(i, a) \ !!(READ_ONCE(((volatile unsigned long *)(a))[BIT_WORD(i)]) & BIT_MASK(i)) static inline int test_and_clear_bit(long bit, volatile unsigned long *var) { long val; var += BIT_WORD(bit); bit %= BITS_PER_LONG; bit = (1UL << bit); do { val = *var; } while (atomic_cmpset_long(var, val, val & ~bit) == 0); return !!(val & bit); } static inline int __test_and_clear_bit(long bit, volatile unsigned long *var) { long val; var += BIT_WORD(bit); bit %= BITS_PER_LONG; bit = (1UL << bit); val = *var; *var &= ~bit; return !!(val & bit); } static inline int test_and_set_bit(long bit, volatile unsigned long *var) { long val; var += BIT_WORD(bit); bit %= BITS_PER_LONG; bit = (1UL << bit); do { val = *var; } while (atomic_cmpset_long(var, val, val | bit) == 0); return !!(val & bit); } static inline int __test_and_set_bit(long bit, volatile unsigned long *var) { long val; var += BIT_WORD(bit); bit %= BITS_PER_LONG; bit = (1UL << bit); val = *var; *var |= bit; return !!(val & bit); } enum { REG_OP_ISFREE, REG_OP_ALLOC, REG_OP_RELEASE, }; static inline int linux_reg_op(unsigned long *bitmap, int pos, int order, int reg_op) { int nbits_reg; int index; int offset; int nlongs_reg; int nbitsinlong; unsigned long mask; int i; int ret = 0; nbits_reg = 1 << order; index = pos / BITS_PER_LONG; offset = pos - (index * BITS_PER_LONG); nlongs_reg = BITS_TO_LONGS(nbits_reg); nbitsinlong = min(nbits_reg, BITS_PER_LONG); mask = (1UL << (nbitsinlong - 1)); mask += mask - 1; mask <<= offset; switch (reg_op) { case REG_OP_ISFREE: for (i = 0; i < nlongs_reg; i++) { if (bitmap[index + i] & mask) goto done; } ret = 1; break; case REG_OP_ALLOC: for (i = 0; i < nlongs_reg; i++) bitmap[index + i] |= mask; break; case REG_OP_RELEASE: for (i = 0; i < nlongs_reg; i++) bitmap[index + i] &= ~mask; break; } done: return ret; } #define for_each_set_bit(bit, addr, size) \ for ((bit) = find_first_bit((addr), (size)); \ (bit) < (size); \ (bit) = find_next_bit((addr), (size), (bit) + 1)) #define for_each_clear_bit(bit, addr, size) \ for ((bit) = find_first_zero_bit((addr), (size)); \ (bit) < (size); \ (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) static inline uint64_t sign_extend64(uint64_t value, int index) { uint8_t shift = 63 - index; return ((int64_t)(value << shift) >> shift); } #endif /* _LINUX_BITOPS_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/cdev.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/cdev.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/cdev.h (revision 331433) @@ -1,147 +1,147 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2016 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_CDEV_H_ #define _LINUX_CDEV_H_ #include #include #include #include struct file_operations; struct inode; struct module; extern struct cdevsw linuxcdevsw; extern const struct kobj_type linux_cdev_ktype; extern const struct kobj_type linux_cdev_static_ktype; struct linux_cdev { struct kobject kobj; struct module *owner; struct cdev *cdev; dev_t dev; const struct file_operations *ops; }; static inline void cdev_init(struct linux_cdev *cdev, const struct file_operations *ops) { kobject_init(&cdev->kobj, &linux_cdev_static_ktype); cdev->ops = ops; } static inline struct linux_cdev * cdev_alloc(void) { struct linux_cdev *cdev; cdev = kzalloc(sizeof(struct linux_cdev), M_WAITOK); if (cdev) kobject_init(&cdev->kobj, &linux_cdev_ktype); return (cdev); } static inline void cdev_put(struct linux_cdev *p) { kobject_put(&p->kobj); } static inline int cdev_add(struct linux_cdev *cdev, dev_t dev, unsigned count) { struct make_dev_args args; int error; if (count != 1) return (-EINVAL); cdev->dev = dev; /* Setup arguments for make_dev_s() */ make_dev_args_init(&args); args.mda_devsw = &linuxcdevsw; args.mda_uid = 0; args.mda_gid = 0; args.mda_mode = 0700; args.mda_si_drv1 = cdev; error = make_dev_s(&args, &cdev->cdev, "%s", kobject_name(&cdev->kobj)); if (error) return (-error); kobject_get(cdev->kobj.parent); return (0); } static inline int cdev_add_ext(struct linux_cdev *cdev, dev_t dev, uid_t uid, gid_t gid, int mode) { struct make_dev_args args; int error; cdev->dev = dev; - + /* Setup arguments for make_dev_s() */ make_dev_args_init(&args); args.mda_devsw = &linuxcdevsw; args.mda_uid = uid; args.mda_gid = gid; args.mda_mode = mode; args.mda_si_drv1 = cdev; error = make_dev_s(&args, &cdev->cdev, "%s/%d", kobject_name(&cdev->kobj), MINOR(dev)); if (error) return (-error); kobject_get(cdev->kobj.parent); return (0); } static inline void cdev_del(struct linux_cdev *cdev) { if (cdev->cdev) { destroy_dev(cdev->cdev); cdev->cdev = NULL; } kobject_put(&cdev->kobj); } struct linux_cdev *linux_find_cdev(const char *name, unsigned major, unsigned minor); #define cdev linux_cdev #endif /* _LINUX_CDEV_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/compiler.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/compiler.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/compiler.h (revision 331433) @@ -1,107 +1,107 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2016 Mellanox Technologies, Ltd. * Copyright (c) 2015 François Tigeot * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_COMPILER_H_ #define _LINUX_COMPILER_H_ #include #define __user #define __kernel #define __safe #define __force #define __nocast #define __iomem #define __chk_user_ptr(x) ((void)0) #define __chk_io_ptr(x) ((void)0) #define __builtin_warning(x, y...) (1) #define __acquires(x) #define __releases(x) #define __acquire(x) do { } while (0) #define __release(x) do { } while (0) #define __cond_lock(x,c) (c) #define __bitwise #define __devinitdata #define __deprecated #define __init #define __devinit #define __devexit #define __exit #define __rcu #define __percpu #define __weak __weak_symbol #define __malloc #define ___stringify(...) #__VA_ARGS__ #define __stringify(...) ___stringify(__VA_ARGS__) #define __attribute_const__ __attribute__((__const__)) #undef __always_inline #define __always_inline inline #define noinline __noinline #define ____cacheline_aligned __aligned(CACHE_LINE_SIZE) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #define typeof(x) __typeof(x) #define uninitialized_var(x) x = x #define __maybe_unused __unused #define __always_unused __unused #define __must_check __result_use_check #define __printf(a,b) __printflike(a,b) #define barrier() __asm__ __volatile__("": : :"memory") #define ___PASTE(a,b) a##b #define __PASTE(a,b) ___PASTE(a,b) #define ACCESS_ONCE(x) (*(volatile __typeof(x) *)&(x)) - + #define WRITE_ONCE(x,v) do { \ barrier(); \ ACCESS_ONCE(x) = (v); \ barrier(); \ } while (0) #define READ_ONCE(x) ({ \ __typeof(x) __var = ({ \ barrier(); \ ACCESS_ONCE(x); \ }); \ barrier(); \ __var; \ }) #define lockless_dereference(p) READ_ONCE(p) #define _AT(T,X) ((T)(X)) #endif /* _LINUX_COMPILER_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/device.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/device.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/device.h (revision 331433) @@ -1,542 +1,542 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2016 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_DEVICE_H_ #define _LINUX_DEVICE_H_ #include #include #include #include #include #include #include #include #include #include #include #include #include struct device; struct fwnode_handle; struct class { const char *name; struct module *owner; struct kobject kobj; devclass_t bsdclass; const struct dev_pm_ops *pm; void (*class_release)(struct class *class); void (*dev_release)(struct device *dev); char * (*devnode)(struct device *dev, umode_t *mode); }; struct dev_pm_ops { int (*suspend)(struct device *dev); int (*suspend_late)(struct device *dev); int (*resume)(struct device *dev); int (*resume_early)(struct device *dev); int (*freeze)(struct device *dev); int (*freeze_late)(struct device *dev); int (*thaw)(struct device *dev); int (*thaw_early)(struct device *dev); int (*poweroff)(struct device *dev); int (*poweroff_late)(struct device *dev); int (*restore)(struct device *dev); int (*restore_early)(struct device *dev); int (*runtime_suspend)(struct device *dev); int (*runtime_resume)(struct device *dev); int (*runtime_idle)(struct device *dev); }; struct device_driver { const char *name; const struct dev_pm_ops *pm; }; struct device_type { const char *name; }; struct device { struct device *parent; struct list_head irqents; device_t bsddev; /* * The following flag is used to determine if the LinuxKPI is * responsible for detaching the BSD device or not. If the * LinuxKPI got the BSD device using devclass_get_device(), it * must not try to detach or delete it, because it's already * done somewhere else. */ bool bsddev_attached_here; struct device_driver *driver; struct device_type *type; dev_t devt; struct class *class; void (*release)(struct device *dev); struct kobject kobj; uint64_t *dma_mask; void *driver_data; unsigned int irq; #define LINUX_IRQ_INVALID 65535 unsigned int msix; unsigned int msix_max; const struct attribute_group **groups; struct fwnode_handle *fwnode; spinlock_t devres_lock; struct list_head devres_head; }; extern struct device linux_root_device; extern struct kobject linux_class_root; extern const struct kobj_type linux_dev_ktype; extern const struct kobj_type linux_class_ktype; struct class_attribute { - struct attribute attr; - ssize_t (*show)(struct class *, struct class_attribute *, char *); - ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t); - const void *(*namespace)(struct class *, const struct class_attribute *); + struct attribute attr; + ssize_t (*show)(struct class *, struct class_attribute *, char *); + ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t); + const void *(*namespace)(struct class *, const struct class_attribute *); }; #define CLASS_ATTR(_name, _mode, _show, _store) \ struct class_attribute class_attr_##_name = \ { { #_name, NULL, _mode }, _show, _store } struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t); }; #define DEVICE_ATTR(_name, _mode, _show, _store) \ struct device_attribute dev_attr_##_name = \ __ATTR(_name, _mode, _show, _store) #define DEVICE_ATTR_RO(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RO(_name) #define DEVICE_ATTR_WO(_name) \ struct device_attribute dev_attr_##_name = __ATTR_WO(_name) #define DEVICE_ATTR_RW(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RW(_name) /* Simple class attribute that is just a static string */ struct class_attribute_string { struct class_attribute attr; char *str; }; static inline ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr, char *buf) { struct class_attribute_string *cs; cs = container_of(attr, struct class_attribute_string, attr); return snprintf(buf, PAGE_SIZE, "%s\n", cs->str); } /* Currently read-only only */ #define _CLASS_ATTR_STRING(_name, _mode, _str) \ { __ATTR(_name, _mode, show_class_attr_string, NULL), _str } #define CLASS_ATTR_STRING(_name, _mode, _str) \ struct class_attribute_string class_attr_##_name = \ _CLASS_ATTR_STRING(_name, _mode, _str) #define dev_err(dev, fmt, ...) device_printf((dev)->bsddev, fmt, ##__VA_ARGS__) #define dev_warn(dev, fmt, ...) device_printf((dev)->bsddev, fmt, ##__VA_ARGS__) #define dev_info(dev, fmt, ...) device_printf((dev)->bsddev, fmt, ##__VA_ARGS__) #define dev_notice(dev, fmt, ...) device_printf((dev)->bsddev, fmt, ##__VA_ARGS__) #define dev_dbg(dev, fmt, ...) do { } while (0) #define dev_printk(lvl, dev, fmt, ...) \ device_printf((dev)->bsddev, fmt, ##__VA_ARGS__) #define dev_err_ratelimited(dev, ...) do { \ static linux_ratelimit_t __ratelimited; \ if (linux_ratelimited(&__ratelimited)) \ dev_err(dev, __VA_ARGS__); \ } while (0) #define dev_warn_ratelimited(dev, ...) do { \ static linux_ratelimit_t __ratelimited; \ if (linux_ratelimited(&__ratelimited)) \ dev_warn(dev, __VA_ARGS__); \ } while (0) static inline void * dev_get_drvdata(const struct device *dev) { return dev->driver_data; } static inline void dev_set_drvdata(struct device *dev, void *data) { dev->driver_data = data; } static inline struct device * get_device(struct device *dev) { if (dev) kobject_get(&dev->kobj); return (dev); } static inline char * dev_name(const struct device *dev) { - return kobject_name(&dev->kobj); + return kobject_name(&dev->kobj); } #define dev_set_name(_dev, _fmt, ...) \ kobject_set_name(&(_dev)->kobj, (_fmt), ##__VA_ARGS__) static inline void put_device(struct device *dev) { if (dev) kobject_put(&dev->kobj); } static inline int class_register(struct class *class) { class->bsdclass = devclass_create(class->name); kobject_init(&class->kobj, &linux_class_ktype); kobject_set_name(&class->kobj, class->name); kobject_add(&class->kobj, &linux_class_root, class->name); return (0); } static inline void class_unregister(struct class *class) { kobject_put(&class->kobj); } static inline struct device *kobj_to_dev(struct kobject *kobj) { return container_of(kobj, struct device, kobj); } /* * Devices are registered and created for exporting to sysfs. Create * implies register and register assumes the device fields have been * setup appropriately before being called. */ static inline void device_initialize(struct device *dev) { device_t bsddev = NULL; int unit = -1; if (dev->devt) { unit = MINOR(dev->devt); bsddev = devclass_get_device(dev->class->bsdclass, unit); dev->bsddev_attached_here = false; } else if (dev->parent == NULL) { bsddev = devclass_get_device(dev->class->bsdclass, 0); dev->bsddev_attached_here = false; } else { dev->bsddev_attached_here = true; } if (bsddev == NULL && dev->parent != NULL) { bsddev = device_add_child(dev->parent->bsddev, dev->class->kobj.name, unit); } if (bsddev != NULL) device_set_softc(bsddev, dev); dev->bsddev = bsddev; MPASS(dev->bsddev != NULL); kobject_init(&dev->kobj, &linux_dev_ktype); spin_lock_init(&dev->devres_lock); INIT_LIST_HEAD(&dev->devres_head); } static inline int device_add(struct device *dev) { if (dev->bsddev != NULL) { if (dev->devt == 0) dev->devt = makedev(0, device_get_unit(dev->bsddev)); } kobject_add(&dev->kobj, &dev->class->kobj, dev_name(dev)); return (0); } static inline void device_create_release(struct device *dev) { kfree(dev); } static inline struct device * device_create_groups_vargs(struct class *class, struct device *parent, dev_t devt, void *drvdata, const struct attribute_group **groups, const char *fmt, va_list args) { struct device *dev = NULL; int retval = -ENODEV; if (class == NULL || IS_ERR(class)) goto error; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { retval = -ENOMEM; goto error; } dev->devt = devt; dev->class = class; dev->parent = parent; dev->groups = groups; dev->release = device_create_release; /* device_initialize() needs the class and parent to be set */ device_initialize(dev); dev_set_drvdata(dev, drvdata); retval = kobject_set_name_vargs(&dev->kobj, fmt, args); if (retval) goto error; retval = device_add(dev); if (retval) goto error; return dev; error: put_device(dev); return ERR_PTR(retval); } static inline struct device * device_create_with_groups(struct class *class, struct device *parent, dev_t devt, void *drvdata, const struct attribute_group **groups, const char *fmt, ...) { va_list vargs; struct device *dev; va_start(vargs, fmt); dev = device_create_groups_vargs(class, parent, devt, drvdata, groups, fmt, vargs); va_end(vargs); return dev; } static inline bool device_is_registered(struct device *dev) { return (dev->bsddev != NULL); } static inline int device_register(struct device *dev) { device_t bsddev = NULL; int unit = -1; if (device_is_registered(dev)) goto done; if (dev->devt) { unit = MINOR(dev->devt); bsddev = devclass_get_device(dev->class->bsdclass, unit); dev->bsddev_attached_here = false; } else if (dev->parent == NULL) { bsddev = devclass_get_device(dev->class->bsdclass, 0); dev->bsddev_attached_here = false; } else { dev->bsddev_attached_here = true; } if (bsddev == NULL && dev->parent != NULL) { bsddev = device_add_child(dev->parent->bsddev, dev->class->kobj.name, unit); } if (bsddev != NULL) { if (dev->devt == 0) dev->devt = makedev(0, device_get_unit(bsddev)); device_set_softc(bsddev, dev); } dev->bsddev = bsddev; done: kobject_init(&dev->kobj, &linux_dev_ktype); kobject_add(&dev->kobj, &dev->class->kobj, dev_name(dev)); return (0); } static inline void device_unregister(struct device *dev) { device_t bsddev; bsddev = dev->bsddev; dev->bsddev = NULL; if (bsddev != NULL && dev->bsddev_attached_here) { mtx_lock(&Giant); device_delete_child(device_get_parent(bsddev), bsddev); mtx_unlock(&Giant); } put_device(dev); } static inline void device_del(struct device *dev) { device_t bsddev; bsddev = dev->bsddev; dev->bsddev = NULL; if (bsddev != NULL && dev->bsddev_attached_here) { mtx_lock(&Giant); device_delete_child(device_get_parent(bsddev), bsddev); mtx_unlock(&Giant); } } struct device *device_create(struct class *class, struct device *parent, dev_t devt, void *drvdata, const char *fmt, ...); static inline void device_destroy(struct class *class, dev_t devt) { device_t bsddev; int unit; unit = MINOR(devt); bsddev = devclass_get_device(class->bsdclass, unit); if (bsddev != NULL) device_unregister(device_get_softc(bsddev)); } static inline void linux_class_kfree(struct class *class) { kfree(class); } static inline struct class * class_create(struct module *owner, const char *name) { struct class *class; int error; class = kzalloc(sizeof(*class), M_WAITOK); class->owner = owner; class->name = name; class->class_release = linux_class_kfree; error = class_register(class); if (error) { kfree(class); return (NULL); } return (class); } static inline void class_destroy(struct class *class) { if (class == NULL) return; class_unregister(class); } static inline int device_create_file(struct device *dev, const struct device_attribute *attr) { if (dev) return sysfs_create_file(&dev->kobj, &attr->attr); return -EINVAL; } static inline void device_remove_file(struct device *dev, const struct device_attribute *attr) { if (dev) sysfs_remove_file(&dev->kobj, &attr->attr); } static inline int class_create_file(struct class *class, const struct class_attribute *attr) { if (class) return sysfs_create_file(&class->kobj, &attr->attr); return -EINVAL; } static inline void class_remove_file(struct class *class, const struct class_attribute *attr) { if (class) sysfs_remove_file(&class->kobj, &attr->attr); } static inline int dev_to_node(struct device *dev) { - return -1; + return -1; } char *kvasprintf(gfp_t, const char *, va_list); char *kasprintf(gfp_t, const char *, ...); #endif /* _LINUX_DEVICE_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/dma-attrs.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/dma-attrs.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/dma-attrs.h (revision 331433) @@ -1,50 +1,50 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_DMA_ATTR_H_ #define _LINUX_DMA_ATTR_H_ enum dma_attr { DMA_ATTR_WRITE_BARRIER, DMA_ATTR_WEAK_ORDERING, DMA_ATTR_MAX, }; #define __DMA_ATTRS_LONGS BITS_TO_LONGS(DMA_ATTR_MAX) struct dma_attrs { unsigned long flags; }; - + #define DEFINE_DMA_ATTRS(x) struct dma_attrs x = { } static inline void init_dma_attrs(struct dma_attrs *attrs) { attrs->flags = 0; } #endif /* _LINUX_DMA_ATTR_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/dma-mapping.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/dma-mapping.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/dma-mapping.h (revision 331433) @@ -1,282 +1,282 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_DMA_MAPPING_H_ #define _LINUX_DMA_MAPPING_H_ #include #include #include #include #include #include #include #include #include #include #include #include #include enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3, }; struct dma_map_ops { void* (*alloc_coherent)(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp); void (*free_coherent)(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); dma_addr_t (*map_page)(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs); void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs); int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs); void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs); void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir); void (*sync_single_for_device)(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir); void (*sync_single_range_for_cpu)(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction dir); void (*sync_single_range_for_device)(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction dir); void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir); void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir); int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); int (*dma_supported)(struct device *dev, u64 mask); int is_phys; }; #define DMA_BIT_MASK(n) ((2ULL << ((n) - 1)) - 1ULL) static inline int dma_supported(struct device *dev, u64 mask) { /* XXX busdma takes care of this elsewhere. */ return (1); } - + static inline int dma_set_mask(struct device *dev, u64 dma_mask) { if (!dev->dma_mask || !dma_supported(dev, dma_mask)) return -EIO; *dev->dma_mask = dma_mask; return (0); } static inline int dma_set_coherent_mask(struct device *dev, u64 mask) { if (!dma_supported(dev, mask)) return -EIO; /* XXX Currently we don't support a separate coherent mask. */ return 0; } static inline void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { vm_paddr_t high; size_t align; void *mem; if (dev != NULL && dev->dma_mask) high = *dev->dma_mask; else if (flag & GFP_DMA32) high = BUS_SPACE_MAXADDR_32BIT; else high = BUS_SPACE_MAXADDR; align = PAGE_SIZE << get_order(size); mem = (void *)kmem_alloc_contig(kmem_arena, size, flag, 0, high, align, 0, VM_MEMATTR_DEFAULT); if (mem) *dma_handle = vtophys(mem); else *dma_handle = 0; return (mem); } static inline void * dma_zalloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { return (dma_alloc_coherent(dev, size, dma_handle, flag | __GFP_ZERO)); } - + static inline void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle) { kmem_free(kmem_arena, (vm_offset_t)cpu_addr, size); } /* XXX This only works with no iommu. */ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { return vtophys(ptr); } static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { } static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { struct scatterlist *sg; int i; - + for_each_sg(sgl, sg, nents, i) sg_dma_address(sg) = sg_phys(sg); return (nents); } static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { } - + static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { return VM_PAGE_TO_PHYS(page) + offset; } static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction) { } static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { } static inline void dma_sync_single(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { dma_sync_single_for_cpu(dev, addr, size, dir); } static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { } static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) { } static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) { } static inline void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction) { } static inline void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction) { } static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return (0); } static inline unsigned int dma_set_max_seg_size(struct device *dev, - unsigned int size) + unsigned int size) { - return (0); + return (0); } #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) #define DEFINE_DMA_UNMAP_ADDR(name) dma_addr_t name #define DEFINE_DMA_UNMAP_LEN(name) __u32 name #define dma_unmap_addr(p, name) ((p)->name) #define dma_unmap_addr_set(p, name, v) (((p)->name) = (v)) #define dma_unmap_len(p, name) ((p)->name) #define dma_unmap_len_set(p, name, v) (((p)->name) = (v)) extern int uma_align_cache; #define dma_get_cache_alignment() uma_align_cache #endif /* _LINUX_DMA_MAPPING_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/err.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/err.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/err.h (revision 331433) @@ -1,81 +1,81 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2016 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_ERR_H_ #define _LINUX_ERR_H_ #include #define MAX_ERRNO 4095 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) static inline void * ERR_PTR(long error) { return (void *)error; } static inline long PTR_ERR(const void *ptr) { return (long)ptr; } static inline long IS_ERR(const void *ptr) { return IS_ERR_VALUE((unsigned long)ptr); } static inline long IS_ERR_OR_NULL(const void *ptr) { return !ptr || IS_ERR_VALUE((unsigned long)ptr); } static inline void * ERR_CAST(void *ptr) { return (void *)ptr; } static inline int PTR_ERR_OR_ZERO(const void *ptr) { - if (IS_ERR(ptr)) - return PTR_ERR(ptr); - else - return 0; + if (IS_ERR(ptr)) + return PTR_ERR(ptr); + else + return 0; } #define PTR_RET(p) PTR_ERR_OR_ZERO(p) #endif /* _LINUX_ERR_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/errno.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/errno.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/errno.h (revision 331433) @@ -1,60 +1,60 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2016 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_ERRNO_H_ #define _LINUX_ERRNO_H_ #include #define ECHRNG EDOM #define ETIME ETIMEDOUT -#define ECOMM ESTALE -#define ENODATA ECONNREFUSED +#define ECOMM ESTALE +#define ENODATA ECONNREFUSED #define ENOIOCTLCMD ENOIOCTL /* Use same value as Linux, because BSD's ERESTART is negative */ #define ERESTARTSYS 512 -#define ENOTSUPP EOPNOTSUPP -#define ENONET EHOSTDOWN +#define ENOTSUPP EOPNOTSUPP +#define ENONET EHOSTDOWN #define ERESTARTNOINTR 513 #define ERESTARTNOHAND 514 #define ERESTART_RESTARTBLOCK 516 #define EPROBE_DEFER 517 #define EOPENSTALE 518 #define EBADHANDLE 521 #define ENOTSYNC 522 #define EBADCOOKIE 523 #define ETOOSMALL 525 #define ESERVERFAULT 526 #define EBADTYPE 527 #define EJUKEBOX 528 #define EIOCBQUEUED 529 #endif /* _LINUX_ERRNO_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/etherdevice.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/etherdevice.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/etherdevice.h (revision 331433) @@ -1,118 +1,118 @@ /*- * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_ETHERDEVICE #define _LINUX_ETHERDEVICE #include #include #include #define ETH_MODULE_SFF_8079 1 #define ETH_MODULE_SFF_8079_LEN 256 #define ETH_MODULE_SFF_8472 2 #define ETH_MODULE_SFF_8472_LEN 512 #define ETH_MODULE_SFF_8636 3 #define ETH_MODULE_SFF_8636_LEN 256 #define ETH_MODULE_SFF_8436 4 #define ETH_MODULE_SFF_8436_LEN 256 struct ethtool_eeprom { u32 offset; u32 len; }; struct ethtool_modinfo { u32 type; u32 eeprom_len; }; -static inline bool +static inline bool is_zero_ether_addr(const u8 * addr) { return ((addr[0] + addr[1] + addr[2] + addr[3] + addr[4] + addr[5]) == 0x00); } -static inline bool +static inline bool is_multicast_ether_addr(const u8 * addr) { return (0x01 & addr[0]); } -static inline bool +static inline bool is_broadcast_ether_addr(const u8 * addr) { return ((addr[0] + addr[1] + addr[2] + addr[3] + addr[4] + addr[5]) == (6 * 0xff)); } -static inline bool +static inline bool is_valid_ether_addr(const u8 * addr) { return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr); } -static inline void +static inline void ether_addr_copy(u8 * dst, const u8 * src) { memcpy(dst, src, 6); } static inline bool ether_addr_equal(const u8 *pa, const u8 *pb) { return (memcmp(pa, pb, 6) == 0); } static inline bool ether_addr_equal_64bits(const u8 *pa, const u8 *pb) { return (memcmp(pa, pb, 6) == 0); } static inline void eth_broadcast_addr(u8 *pa) { memset(pa, 0xff, 6); } static inline void eth_zero_addr(u8 *pa) { memset(pa, 0, 6); } static inline void random_ether_addr(u8 * dst) { if (read_random(dst, 6) == 0) arc4rand(dst, 6, 0); dst[0] &= 0xfe; dst[0] |= 0x02; } #endif /* _LINUX_ETHERDEVICE */ Index: head/sys/compat/linuxkpi/common/include/linux/fs.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/fs.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/fs.h (revision 331433) @@ -1,322 +1,322 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_FS_H_ #define _LINUX_FS_H_ #include #include #include #include #include #include #include #include #include #include #include #include struct module; struct kiocb; struct iovec; struct dentry; struct page; struct file_lock; struct pipe_inode_info; struct vm_area_struct; struct poll_table_struct; struct files_struct; struct pfs_node; #define inode vnode #define i_cdev v_rdev #define i_private v_data #define S_IRUGO (S_IRUSR | S_IRGRP | S_IROTH) #define S_IWUGO (S_IWUSR | S_IWGRP | S_IWOTH) typedef struct files_struct *fl_owner_t; struct file_operations; struct linux_file_wait_queue { struct wait_queue wq; struct wait_queue_head *wqh; atomic_t state; #define LINUX_FWQ_STATE_INIT 0 #define LINUX_FWQ_STATE_NOT_READY 1 #define LINUX_FWQ_STATE_QUEUED 2 #define LINUX_FWQ_STATE_READY 3 #define LINUX_FWQ_STATE_MAX 4 }; struct linux_file { struct file *_file; const struct file_operations *f_op; - void *private_data; + void *private_data; int f_flags; int f_mode; /* Just starting mode. */ struct dentry *f_dentry; struct dentry f_dentry_store; struct selinfo f_selinfo; struct sigio *f_sigio; struct vnode *f_vnode; #define f_inode f_vnode volatile u_int f_count; /* anonymous shmem object */ vm_object_t f_shmem; /* kqfilter support */ int f_kqflags; #define LINUX_KQ_FLAG_HAS_READ (1 << 0) #define LINUX_KQ_FLAG_HAS_WRITE (1 << 1) #define LINUX_KQ_FLAG_NEED_READ (1 << 2) #define LINUX_KQ_FLAG_NEED_WRITE (1 << 3) /* protects f_selinfo.si_note */ spinlock_t f_kqlock; struct linux_file_wait_queue f_wait_queue; }; #define file linux_file #define fasync_struct sigio * #define fasync_helper(fd, filp, on, queue) \ ({ \ if ((on)) \ *(queue) = &(filp)->f_sigio; \ else \ *(queue) = NULL; \ 0; \ }) #define kill_fasync(queue, sig, pollstat) \ do { \ if (*(queue) != NULL) \ pgsigio(*(queue), (sig), 0); \ } while (0) typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned); struct file_operations { struct module *owner; ssize_t (*read)(struct file *, char __user *, size_t, loff_t *); ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *); unsigned int (*poll) (struct file *, struct poll_table_struct *); long (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*release)(struct inode *, struct file *); int (*fasync)(int, struct file *, int); /* Although not supported in FreeBSD, to align with Linux code - * we are adding llseek() only when it is mapped to no_llseek which returns + * we are adding llseek() only when it is mapped to no_llseek which returns * an illegal seek error */ loff_t (*llseek)(struct file *, loff_t, int); #if 0 /* We do not support these methods. Don't permit them to compile. */ loff_t (*llseek)(struct file *, loff_t, int); ssize_t (*aio_read)(struct kiocb *, const struct iovec *, unsigned long, loff_t); ssize_t (*aio_write)(struct kiocb *, const struct iovec *, unsigned long, loff_t); int (*readdir)(struct file *, void *, filldir_t); int (*ioctl)(struct inode *, struct file *, unsigned int, unsigned long); int (*flush)(struct file *, fl_owner_t id); int (*fsync)(struct file *, struct dentry *, int datasync); int (*aio_fsync)(struct kiocb *, int datasync); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t, loff_t *, int); unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); int (*setlease)(struct file *, long, struct file_lock **); #endif }; #define fops_get(fops) (fops) #define replace_fops(f, fops) ((f)->f_op = (fops)) #define FMODE_READ FREAD #define FMODE_WRITE FWRITE #define FMODE_EXEC FEXEC int __register_chrdev(unsigned int major, unsigned int baseminor, unsigned int count, const char *name, const struct file_operations *fops); int __register_chrdev_p(unsigned int major, unsigned int baseminor, unsigned int count, const char *name, const struct file_operations *fops, uid_t uid, gid_t gid, int mode); void __unregister_chrdev(unsigned int major, unsigned int baseminor, unsigned int count, const char *name); static inline void unregister_chrdev(unsigned int major, const char *name) { __unregister_chrdev(major, 0, 256, name); } static inline int register_chrdev(unsigned int major, const char *name, const struct file_operations *fops) { return (__register_chrdev(major, 0, 256, name, fops)); } static inline int register_chrdev_p(unsigned int major, const char *name, const struct file_operations *fops, uid_t uid, gid_t gid, int mode) { return (__register_chrdev_p(major, 0, 256, name, fops, uid, gid, mode)); } static inline int register_chrdev_region(dev_t dev, unsigned range, const char *name) { return 0; } static inline void unregister_chrdev_region(dev_t dev, unsigned range) { return; } static inline int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count, const char *name) { return 0; } /* No current support for seek op in FreeBSD */ static inline int nonseekable_open(struct inode *inode, struct file *filp) { return 0; } extern unsigned int linux_iminor(struct inode *); #define iminor(...) linux_iminor(__VA_ARGS__) static inline struct linux_file * get_file(struct linux_file *f) { refcount_acquire(f->_file == NULL ? &f->f_count : &f->_file->f_count); return (f); } static inline struct inode * igrab(struct inode *inode) { int error; error = vget(inode, 0, curthread); if (error) return (NULL); return (inode); } static inline void iput(struct inode *inode) { vrele(inode); } -static inline loff_t +static inline loff_t no_llseek(struct file *file, loff_t offset, int whence) { return (-ESPIPE); } static inline loff_t noop_llseek(struct linux_file *file, loff_t offset, int whence) { return (file->_file->f_offset); } static inline struct vnode * file_inode(const struct linux_file *file) { return (file->f_vnode); } static inline int call_mmap(struct linux_file *file, struct vm_area_struct *vma) { return (file->f_op->mmap(file, vma)); } /* Shared memory support */ unsigned long linux_invalidate_mapping_pages(vm_object_t, pgoff_t, pgoff_t); struct page *linux_shmem_read_mapping_page_gfp(vm_object_t, int, gfp_t); struct linux_file *linux_shmem_file_setup(const char *, loff_t, unsigned long); void linux_shmem_truncate_range(vm_object_t, loff_t, loff_t); #define invalidate_mapping_pages(...) \ linux_invalidate_mapping_pages(__VA_ARGS__) #define shmem_read_mapping_page(...) \ linux_shmem_read_mapping_page_gfp(__VA_ARGS__, 0) #define shmem_read_mapping_page_gfp(...) \ linux_shmem_read_mapping_page_gfp(__VA_ARGS__) #define shmem_file_setup(...) \ linux_shmem_file_setup(__VA_ARGS__) #define shmem_truncate_range(...) \ linux_shmem_truncate_range(__VA_ARGS__) #endif /* _LINUX_FS_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/idr.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/idr.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/idr.h (revision 331433) @@ -1,127 +1,127 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2016 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_IDR_H_ #define _LINUX_IDR_H_ #include #include #include #include #define IDR_BITS 5 #define IDR_SIZE (1 << IDR_BITS) #define IDR_MASK (IDR_SIZE - 1) #define MAX_ID_SHIFT ((sizeof(int) * NBBY) - 1) #define MAX_ID_BIT (1U << MAX_ID_SHIFT) #define MAX_ID_MASK (MAX_ID_BIT - 1) #define MAX_LEVEL (MAX_ID_SHIFT + IDR_BITS - 1) / IDR_BITS #define MAX_IDR_SHIFT (sizeof(int)*8 - 1) #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) #define MAX_IDR_MASK (MAX_IDR_BIT - 1) struct idr_layer { unsigned long bitmap; struct idr_layer *ary[IDR_SIZE]; }; struct idr { struct mtx lock; struct idr_layer *top; struct idr_layer *free; int layers; int next_cyclic_id; }; /* NOTE: It is the applications responsibility to destroy the IDR */ #define DEFINE_IDR(name) \ struct idr name; \ SYSINIT(name##_idr_sysinit, SI_SUB_DRIVERS, SI_ORDER_FIRST, \ idr_init, &(name)) /* NOTE: It is the applications responsibility to destroy the IDA */ #define DEFINE_IDA(name) \ struct ida name; \ SYSINIT(name##_ida_sysinit, SI_SUB_DRIVERS, SI_ORDER_FIRST, \ ida_init, &(name)) void idr_preload(gfp_t gfp_mask); void idr_preload_end(void); void *idr_find(struct idr *idp, int id); void *idr_get_next(struct idr *idp, int *nextid); int idr_pre_get(struct idr *idp, gfp_t gfp_mask); int idr_get_new(struct idr *idp, void *ptr, int *id); int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); void *idr_replace(struct idr *idp, void *ptr, int id); void idr_remove(struct idr *idp, int id); void idr_remove_all(struct idr *idp); void idr_destroy(struct idr *idp); void idr_init(struct idr *idp); int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t); int idr_alloc_cyclic(struct idr *idp, void *ptr, int start, int end, gfp_t); int idr_for_each(struct idr *idp, int (*fn)(int id, void *p, void *data), void *data); #define idr_for_each_entry(idp, entry, id) \ for ((id) = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++(id)) #define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ #define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long) - 1) -#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) +#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) struct ida_bitmap { long nr_busy; unsigned long bitmap[IDA_BITMAP_LONGS]; }; struct ida { struct idr idr; struct ida_bitmap *free_bitmap; }; int ida_pre_get(struct ida *ida, gfp_t gfp_mask); int ida_get_new_above(struct ida *ida, int starting_id, int *p_id); void ida_remove(struct ida *ida, int id); void ida_destroy(struct ida *ida); void ida_init(struct ida *ida); int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, gfp_t gfp_mask); void ida_simple_remove(struct ida *ida, unsigned int id); static inline int ida_get_new(struct ida *ida, int *p_id) { return (ida_get_new_above(ida, 0, p_id)); } #endif /* _LINUX_IDR_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/if_ether.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/if_ether.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/if_ether.h (revision 331433) @@ -1,55 +1,55 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_IF_ETHER_H_ #define _LINUX_IF_ETHER_H_ #include #include -#define ETH_HLEN ETHER_HDR_LEN /* Total octets in header. */ +#define ETH_HLEN ETHER_HDR_LEN /* Total octets in header. */ #ifndef ETH_ALEN -#define ETH_ALEN ETHER_ADDR_LEN +#define ETH_ALEN ETHER_ADDR_LEN #endif -#define ETH_FCS_LEN 4 /* Octets in the FCS */ -#define VLAN_HLEN 4 /* The additional bytes (on top of the Ethernet header) - * that VLAN requires. */ +#define ETH_FCS_LEN 4 /* Octets in the FCS */ +#define VLAN_HLEN 4 /* The additional bytes (on top of the Ethernet header) + * that VLAN requires. */ /* * defined Ethernet Protocol ID's. */ -#define ETH_P_IP ETHERTYPE_IP +#define ETH_P_IP ETHERTYPE_IP #define ETH_P_IPV6 ETHERTYPE_IPV6 #define ETH_P_MPLS_UC ETHERTYPE_MPLS #define ETH_P_MPLS_MC ETHERTYPE_MPLS_MCAST #define ETH_P_8021Q ETHERTYPE_VLAN #define ETH_P_8021AD ETHERTYPE_QINQ #endif /* _LINUX_IF_ETHER_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/if_vlan.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/if_vlan.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/if_vlan.h (revision 331433) @@ -1,59 +1,59 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2016 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_IF_VLAN_H_ #define _LINUX_IF_VLAN_H_ #include #include #include #include #include #include #include -#define VLAN_N_VID 4096 +#define VLAN_N_VID 4096 static inline int is_vlan_dev(struct ifnet *ifp) { return (ifp->if_type == IFT_L2VLAN); } static inline uint16_t vlan_dev_vlan_id(struct ifnet *ifp) { uint16_t vtag; if (VLAN_TAG(ifp, &vtag) == 0) return (vtag); return (0); } #endif /* _LINUX_IF_VLAN_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/io.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/io.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/io.h (revision 331433) @@ -1,276 +1,276 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2015 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_IO_H_ #define _LINUX_IO_H_ #include #include #include #include #include static inline uint32_t __raw_readl(const volatile void *addr) { return *(const volatile uint32_t *)addr; } static inline void __raw_writel(uint32_t b, volatile void *addr) { *(volatile uint32_t *)addr = b; } static inline uint64_t __raw_readq(const volatile void *addr) { return *(const volatile uint64_t *)addr; } static inline void __raw_writeq(uint64_t b, volatile void *addr) { *(volatile uint64_t *)addr = b; } /* * XXX This is all x86 specific. It should be bus space access. */ #define mmiowb() barrier() #undef writel static inline void writel(uint32_t b, void *addr) { - *(volatile uint32_t *)addr = b; + *(volatile uint32_t *)addr = b; } #undef writel_relaxed static inline void writel_relaxed(uint32_t b, void *addr) { *(volatile uint32_t *)addr = b; } #undef writeq static inline void writeq(uint64_t b, void *addr) { - *(volatile uint64_t *)addr = b; + *(volatile uint64_t *)addr = b; } #undef writeb static inline void writeb(uint8_t b, void *addr) { - *(volatile uint8_t *)addr = b; + *(volatile uint8_t *)addr = b; } #undef writew static inline void writew(uint16_t b, void *addr) { - *(volatile uint16_t *)addr = b; + *(volatile uint16_t *)addr = b; } #undef ioread8 static inline uint8_t ioread8(const volatile void *addr) { return *(const volatile uint8_t *)addr; } #undef ioread16 static inline uint16_t ioread16(const volatile void *addr) { return *(const volatile uint16_t *)addr; } #undef ioread16be static inline uint16_t ioread16be(const volatile void *addr) { return be16toh(*(const volatile uint16_t *)addr); } #undef ioread32 static inline uint32_t ioread32(const volatile void *addr) { return *(const volatile uint32_t *)addr; } #undef ioread32be static inline uint32_t ioread32be(const volatile void *addr) { return be32toh(*(const volatile uint32_t *)addr); } #undef iowrite8 static inline void iowrite8(uint8_t v, volatile void *addr) { *(volatile uint8_t *)addr = v; } #undef iowrite16 static inline void iowrite16(uint16_t v, volatile void *addr) { *(volatile uint16_t *)addr = v; } #undef iowrite32 static inline void iowrite32(uint32_t v, volatile void *addr) { *(volatile uint32_t *)addr = v; } #undef iowrite32be static inline void iowrite32be(uint32_t v, volatile void *addr) { *(volatile uint32_t *)addr = htobe32(v); } #undef readb static inline uint8_t readb(const volatile void *addr) { return *(const volatile uint8_t *)addr; } #undef readw static inline uint16_t readw(const volatile void *addr) { return *(const volatile uint16_t *)addr; } #undef readl static inline uint32_t readl(const volatile void *addr) { return *(const volatile uint32_t *)addr; } #if defined(__i386__) || defined(__amd64__) static inline void _outb(u_char data, u_int port) { __asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port)); } #endif #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) void *_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr); #else #define _ioremap_attr(...) NULL #endif #define ioremap_nocache(addr, size) \ _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE) #define ioremap_wc(addr, size) \ _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_COMBINING) #define ioremap_wb(addr, size) \ _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_BACK) #define ioremap_wt(addr, size) \ _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_THROUGH) #define ioremap(addr, size) \ _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE) void iounmap(void *addr); #define memset_io(a, b, c) memset((a), (b), (c)) #define memcpy_fromio(a, b, c) memcpy((a), (b), (c)) #define memcpy_toio(a, b, c) memcpy((a), (b), (c)) static inline void __iowrite32_copy(void *to, void *from, size_t count) { uint32_t *src; uint32_t *dst; int i; for (i = 0, src = from, dst = to; i < count; i++, src++, dst++) __raw_writel(*src, dst); } static inline void __iowrite64_copy(void *to, void *from, size_t count) { #ifdef __LP64__ uint64_t *src; uint64_t *dst; int i; for (i = 0, src = from, dst = to; i < count; i++, src++, dst++) __raw_writeq(*src, dst); #else __iowrite32_copy(to, from, count * 2); #endif } enum { MEMREMAP_WB = 1 << 0, MEMREMAP_WT = 1 << 1, MEMREMAP_WC = 1 << 2, }; static inline void * memremap(resource_size_t offset, size_t size, unsigned long flags) { void *addr = NULL; if ((flags & MEMREMAP_WB) && (addr = ioremap_wb(offset, size)) != NULL) goto done; if ((flags & MEMREMAP_WT) && (addr = ioremap_wt(offset, size)) != NULL) goto done; if ((flags & MEMREMAP_WC) && (addr = ioremap_wc(offset, size)) != NULL) goto done; done: return (addr); } static inline void memunmap(void *addr) { /* XXX May need to check if this is RAM */ iounmap(addr); } #endif /* _LINUX_IO_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/jiffies.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/jiffies.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/jiffies.h (revision 331433) @@ -1,153 +1,153 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2016 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_JIFFIES_H_ #define _LINUX_JIFFIES_H_ #include #include #include #include #include -#define jiffies ticks +#define jiffies ticks #define jiffies_64 ticks #define jiffies_to_msecs(x) (((int64_t)(int)(x)) * 1000 / hz) #define MAX_JIFFY_OFFSET ((INT_MAX >> 1) - 1) #define time_after(a, b) ((int)((b) - (a)) < 0) #define time_before(a, b) time_after(b,a) #define time_after_eq(a, b) ((int)((a) - (b)) >= 0) #define time_before_eq(a, b) time_after_eq(b, a) #define time_in_range(a,b,c) \ (time_after_eq(a,b) && time_before_eq(a,c)) #define time_is_after_eq_jiffies(a) time_after_eq(a, jiffies) #define HZ hz static inline int timespec_to_jiffies(const struct timespec *ts) { u64 result; result = ((u64)hz * ts->tv_sec) + (((u64)hz * ts->tv_nsec + NSEC_PER_SEC - 1) / NSEC_PER_SEC); if (result > MAX_JIFFY_OFFSET) result = MAX_JIFFY_OFFSET; return ((int)result); } static inline int msecs_to_jiffies(uint64_t msec) { uint64_t msec_max, result; msec_max = -1ULL / (uint64_t)hz; if (msec > msec_max) msec = msec_max; result = howmany(msec * (uint64_t)hz, 1000ULL); if (result > MAX_JIFFY_OFFSET) result = MAX_JIFFY_OFFSET; return ((int)result); } static inline int usecs_to_jiffies(uint64_t usec) { uint64_t usec_max, result; usec_max = -1ULL / (uint64_t)hz; if (usec > usec_max) usec = usec_max; result = howmany(usec * (uint64_t)hz, 1000000ULL); if (result > MAX_JIFFY_OFFSET) result = MAX_JIFFY_OFFSET; return ((int)result); } static inline uint64_t nsecs_to_jiffies64(uint64_t nsec) { uint64_t nsec_max, result; nsec_max = -1ULL / (uint64_t)hz; if (nsec > nsec_max) nsec = nsec_max; result = howmany(nsec * (uint64_t)hz, 1000000000ULL); if (result > MAX_JIFFY_OFFSET) result = MAX_JIFFY_OFFSET; return (result); } static inline uint64_t nsecs_to_jiffies(uint64_t n) { return (usecs_to_jiffies(howmany(n, 1000ULL))); } static inline uint64_t jiffies_to_nsecs(int j) { return ((1000000000ULL / hz) * (uint64_t)(unsigned int)j); } static inline uint64_t jiffies_to_usecs(int j) { return ((1000000ULL / hz) * (uint64_t)(unsigned int)j); } static inline uint64_t get_jiffies_64(void) { return ((uint64_t)(unsigned int)ticks); } static inline int linux_timer_jiffies_until(int expires) { int delta = expires - jiffies; /* guard against already expired values */ if (delta < 1) delta = 1; return (delta); } #endif /* _LINUX_JIFFIES_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/kernel.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/kernel.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/kernel.h (revision 331433) @@ -1,460 +1,460 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2016 Mellanox Technologies, Ltd. * Copyright (c) 2014-2015 François Tigeot * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_KERNEL_H_ #define _LINUX_KERNEL_H_ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #define KERN_CONT "" #define KERN_EMERG "<0>" #define KERN_ALERT "<1>" #define KERN_CRIT "<2>" #define KERN_ERR "<3>" #define KERN_WARNING "<4>" #define KERN_NOTICE "<5>" #define KERN_INFO "<6>" #define KERN_DEBUG "<7>" #define U8_MAX ((u8)~0U) #define S8_MAX ((s8)(U8_MAX >> 1)) #define S8_MIN ((s8)(-S8_MAX - 1)) #define U16_MAX ((u16)~0U) #define S16_MAX ((s16)(U16_MAX >> 1)) #define S16_MIN ((s16)(-S16_MAX - 1)) #define U32_MAX ((u32)~0U) #define S32_MAX ((s32)(U32_MAX >> 1)) #define S32_MIN ((s32)(-S32_MAX - 1)) #define U64_MAX ((u64)~0ULL) #define S64_MAX ((s64)(U64_MAX >> 1)) #define S64_MIN ((s64)(-S64_MAX - 1)) #define S8_C(x) x #define U8_C(x) x ## U #define S16_C(x) x #define U16_C(x) x ## U #define S32_C(x) x #define U32_C(x) x ## U #define S64_C(x) x ## LL #define U64_C(x) x ## ULL #define BUILD_BUG() do { CTASSERT(0); } while (0) #define BUILD_BUG_ON(x) CTASSERT(!(x)) #define BUILD_BUG_ON_MSG(x, msg) BUILD_BUG_ON(x) #define BUILD_BUG_ON_NOT_POWER_OF_2(x) BUILD_BUG_ON(!powerof2(x)) #define BUILD_BUG_ON_INVALID(expr) while (0) { (void)(expr); } #define BUG() panic("BUG at %s:%d", __FILE__, __LINE__) #define BUG_ON(cond) do { \ if (cond) { \ panic("BUG ON %s failed at %s:%d", \ __stringify(cond), __FILE__, __LINE__); \ } \ } while (0) #define WARN_ON(cond) ({ \ bool __ret = (cond); \ if (__ret) { \ printf("WARNING %s failed at %s:%d\n", \ __stringify(cond), __FILE__, __LINE__); \ } \ unlikely(__ret); \ }) #define WARN_ON_SMP(cond) WARN_ON(cond) #define WARN_ON_ONCE(cond) ({ \ static bool __warn_on_once; \ bool __ret = (cond); \ if (__ret && !__warn_on_once) { \ __warn_on_once = 1; \ printf("WARNING %s failed at %s:%d\n", \ __stringify(cond), __FILE__, __LINE__); \ } \ unlikely(__ret); \ }) #define oops_in_progress SCHEDULER_STOPPED() #undef ALIGN #define ALIGN(x, y) roundup2((x), (y)) #undef PTR_ALIGN #define PTR_ALIGN(p, a) ((__typeof(p))ALIGN((uintptr_t)(p), (a))) #define DIV_ROUND_UP(x, n) howmany(x, n) #define DIV_ROUND_UP_ULL(x, n) DIV_ROUND_UP((unsigned long long)(x), (n)) #define FIELD_SIZEOF(t, f) sizeof(((t *)0)->f) #define printk(...) printf(__VA_ARGS__) #define vprintk(f, a) vprintf(f, a) struct va_format { const char *fmt; va_list *va; }; static inline int vscnprintf(char *buf, size_t size, const char *fmt, va_list args) { ssize_t ssize = size; int i; i = vsnprintf(buf, size, fmt, args); return ((i >= ssize) ? (ssize - 1) : i); } static inline int scnprintf(char *buf, size_t size, const char *fmt, ...) { va_list args; int i; va_start(args, fmt); i = vscnprintf(buf, size, fmt, args); va_end(args); return (i); } /* * The "pr_debug()" and "pr_devel()" macros should produce zero code * unless DEBUG is defined: */ #ifdef DEBUG #define pr_debug(fmt, ...) \ - log(LOG_DEBUG, fmt, ##__VA_ARGS__) + log(LOG_DEBUG, fmt, ##__VA_ARGS__) #define pr_devel(fmt, ...) \ log(LOG_DEBUG, pr_fmt(fmt), ##__VA_ARGS__) #else #define pr_debug(fmt, ...) \ - ({ if (0) log(LOG_DEBUG, fmt, ##__VA_ARGS__); 0; }) + ({ if (0) log(LOG_DEBUG, fmt, ##__VA_ARGS__); 0; }) #define pr_devel(fmt, ...) \ ({ if (0) log(LOG_DEBUG, pr_fmt(fmt), ##__VA_ARGS__); 0; }) #endif #ifndef pr_fmt #define pr_fmt(fmt) fmt #endif /* * Print a one-time message (analogous to WARN_ONCE() et al): */ #define printk_once(...) do { \ static bool __print_once; \ \ if (!__print_once) { \ __print_once = true; \ printk(__VA_ARGS__); \ } \ } while (0) /* * Log a one-time message (analogous to WARN_ONCE() et al): */ #define log_once(level,...) do { \ static bool __log_once; \ \ if (unlikely(!__log_once)) { \ __log_once = true; \ log(level, __VA_ARGS__); \ } \ } while (0) #define pr_emerg(fmt, ...) \ log(LOG_EMERG, pr_fmt(fmt), ##__VA_ARGS__) #define pr_alert(fmt, ...) \ log(LOG_ALERT, pr_fmt(fmt), ##__VA_ARGS__) #define pr_crit(fmt, ...) \ log(LOG_CRIT, pr_fmt(fmt), ##__VA_ARGS__) #define pr_err(fmt, ...) \ log(LOG_ERR, pr_fmt(fmt), ##__VA_ARGS__) #define pr_warning(fmt, ...) \ log(LOG_WARNING, pr_fmt(fmt), ##__VA_ARGS__) #define pr_warn(...) \ pr_warning(__VA_ARGS__) #define pr_warn_once(fmt, ...) \ log_once(LOG_WARNING, pr_fmt(fmt), ##__VA_ARGS__) #define pr_notice(fmt, ...) \ log(LOG_NOTICE, pr_fmt(fmt), ##__VA_ARGS__) #define pr_info(fmt, ...) \ log(LOG_INFO, pr_fmt(fmt), ##__VA_ARGS__) #define pr_info_once(fmt, ...) \ log_once(LOG_INFO, pr_fmt(fmt), ##__VA_ARGS__) #define pr_cont(fmt, ...) \ printk(KERN_CONT fmt, ##__VA_ARGS__) #define pr_warn_ratelimited(...) do { \ static linux_ratelimit_t __ratelimited; \ if (linux_ratelimited(&__ratelimited)) \ pr_warning(__VA_ARGS__); \ } while (0) #ifndef WARN #define WARN(condition, ...) ({ \ - bool __ret_warn_on = (condition); \ - if (unlikely(__ret_warn_on)) \ - pr_warning(__VA_ARGS__); \ - unlikely(__ret_warn_on); \ + bool __ret_warn_on = (condition); \ + if (unlikely(__ret_warn_on)) \ + pr_warning(__VA_ARGS__); \ + unlikely(__ret_warn_on); \ }) #endif #ifndef WARN_ONCE #define WARN_ONCE(condition, ...) ({ \ - bool __ret_warn_on = (condition); \ - if (unlikely(__ret_warn_on)) \ - pr_warn_once(__VA_ARGS__); \ - unlikely(__ret_warn_on); \ + bool __ret_warn_on = (condition); \ + if (unlikely(__ret_warn_on)) \ + pr_warn_once(__VA_ARGS__); \ + unlikely(__ret_warn_on); \ }) #endif #define container_of(ptr, type, member) \ ({ \ const __typeof(((type *)0)->member) *__p = (ptr); \ (type *)((uintptr_t)__p - offsetof(type, member)); \ }) - + #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define u64_to_user_ptr(val) ((void *)(uintptr_t)(val)) static inline unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) { return (strtouq(cp, endp, base)); } static inline long long simple_strtoll(const char *cp, char **endp, unsigned int base) { return (strtoq(cp, endp, base)); } static inline unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base) { return (strtoul(cp, endp, base)); } static inline long simple_strtol(const char *cp, char **endp, unsigned int base) { return (strtol(cp, endp, base)); } static inline int kstrtoul(const char *cp, unsigned int base, unsigned long *res) { char *end; *res = strtoul(cp, &end, base); /* skip newline character, if any */ if (*end == '\n') end++; if (*cp == 0 || *end != 0) return (-EINVAL); return (0); } static inline int kstrtol(const char *cp, unsigned int base, long *res) { char *end; *res = strtol(cp, &end, base); /* skip newline character, if any */ if (*end == '\n') end++; if (*cp == 0 || *end != 0) return (-EINVAL); return (0); } static inline int kstrtoint(const char *cp, unsigned int base, int *res) { char *end; long temp; *res = temp = strtol(cp, &end, base); /* skip newline character, if any */ if (*end == '\n') end++; if (*cp == 0 || *end != 0) return (-EINVAL); if (temp != (int)temp) return (-ERANGE); return (0); } static inline int kstrtouint(const char *cp, unsigned int base, unsigned int *res) { char *end; unsigned long temp; *res = temp = strtoul(cp, &end, base); /* skip newline character, if any */ if (*end == '\n') end++; if (*cp == 0 || *end != 0) return (-EINVAL); if (temp != (unsigned int)temp) return (-ERANGE); return (0); } static inline int kstrtou32(const char *cp, unsigned int base, u32 *res) { char *end; unsigned long temp; *res = temp = strtoul(cp, &end, base); /* skip newline character, if any */ if (*end == '\n') end++; if (*cp == 0 || *end != 0) return (-EINVAL); if (temp != (u32)temp) return (-ERANGE); return (0); } #define min(x, y) ((x) < (y) ? (x) : (y)) #define max(x, y) ((x) > (y) ? (x) : (y)) #define min3(a, b, c) min(a, min(b,c)) #define max3(a, b, c) max(a, max(b,c)) #define min_t(type, x, y) ({ \ type __min1 = (x); \ type __min2 = (y); \ __min1 < __min2 ? __min1 : __min2; }) #define max_t(type, x, y) ({ \ type __max1 = (x); \ type __max2 = (y); \ __max1 > __max2 ? __max1 : __max2; }) #define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max) #define clamp(x, lo, hi) min( max(x,lo), hi) #define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) /* * This looks more complex than it should be. But we need to * get the type for the ~ right in round_down (it needs to be * as wide as the result!), and we want to evaluate the macro * arguments just once each. */ #define __round_mask(x, y) ((__typeof__(x))((y)-1)) #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) #define round_down(x, y) ((x) & ~__round_mask(x, y)) #define smp_processor_id() PCPU_GET(cpuid) #define num_possible_cpus() mp_ncpus #define num_online_cpus() mp_ncpus #if defined(__i386__) || defined(__amd64__) extern bool linux_cpu_has_clflush; #define cpu_has_clflush linux_cpu_has_clflush #endif typedef struct pm_message { - int event; + int event; } pm_message_t; /* Swap values of a and b */ #define swap(a, b) do { \ typeof(a) _swap_tmp = a; \ a = b; \ b = _swap_tmp; \ } while (0) #define DIV_ROUND_CLOSEST(x, divisor) (((x) + ((divisor) / 2)) / (divisor)) #define DIV_ROUND_CLOSEST_ULL(x, divisor) ({ \ __typeof(divisor) __d = (divisor); \ unsigned long long __ret = (x) + (__d) / 2; \ __ret /= __d; \ __ret; \ }) static inline uintmax_t mult_frac(uintmax_t x, uintmax_t multiplier, uintmax_t divisor) { uintmax_t q = (x / divisor); uintmax_t r = (x % divisor); return ((q * multiplier) + ((r * multiplier) / divisor)); } static inline int64_t abs64(int64_t x) { return (x < 0 ? -x : x); } typedef struct linux_ratelimit { struct timeval lasttime; int counter; } linux_ratelimit_t; static inline bool linux_ratelimited(linux_ratelimit_t *rl) { return (ppsratecheck(&rl->lasttime, &rl->counter, 1)); } #endif /* _LINUX_KERNEL_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/kmod.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/kmod.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/kmod.h (revision 331433) @@ -1,52 +1,52 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_KMOD_H_ #define _LINUX_KMOD_H_ #include #include #include #include #include #include #define request_module(...) \ ({\ char modname[128]; \ - int fileid; \ + int fileid; \ snprintf(modname, sizeof(modname), __VA_ARGS__); \ kern_kldload(curthread, modname, &fileid); \ }) #define request_module_nowait request_module #endif /* _LINUX_KMOD_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/kobject.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/kobject.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/kobject.h (revision 331433) @@ -1,154 +1,154 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_KOBJECT_H_ #define _LINUX_KOBJECT_H_ #include #include #include #include #include struct kobject; struct sysctl_oid; struct kobj_type { void (*release)(struct kobject *kobj); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; }; extern const struct kobj_type linux_kfree_type; struct kobject { struct kobject *parent; char *name; struct kref kref; const struct kobj_type *ktype; struct list_head entry; struct sysctl_oid *oidp; }; extern struct kobject *mm_kobj; struct attribute { - const char *name; + const char *name; struct module *owner; mode_t mode; }; struct kobj_attribute { - struct attribute attr; - ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr, - char *buf); - ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr, - const char *buf, size_t count); + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr, + char *buf); + ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count); }; static inline void kobject_init(struct kobject *kobj, const struct kobj_type *ktype) { kref_init(&kobj->kref); INIT_LIST_HEAD(&kobj->entry); kobj->ktype = ktype; kobj->oidp = NULL; } void linux_kobject_release(struct kref *kref); static inline void kobject_put(struct kobject *kobj) { if (kobj) kref_put(&kobj->kref, linux_kobject_release); } static inline struct kobject * kobject_get(struct kobject *kobj) { if (kobj) kref_get(&kobj->kref); return kobj; } int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list); int kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...); static inline struct kobject * kobject_create(void) { struct kobject *kobj; kobj = kzalloc(sizeof(*kobj), GFP_KERNEL); if (kobj == NULL) return (NULL); kobject_init(kobj, &linux_kfree_type); return (kobj); } static inline struct kobject * kobject_create_and_add(const char *name, struct kobject *parent) { struct kobject *kobj; kobj = kobject_create(); if (kobj == NULL) return (NULL); if (kobject_add(kobj, parent, "%s", name) == 0) return (kobj); kobject_put(kobj); return (NULL); } static inline void kobject_del(struct kobject *kobj __unused) { } static inline char * kobject_name(const struct kobject *kobj) { return kobj->name; } int kobject_set_name(struct kobject *kobj, const char *fmt, ...); int kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, struct kobject *parent, const char *fmt, ...); #endif /* _LINUX_KOBJECT_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/ktime.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/ktime.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/ktime.h (revision 331433) @@ -1,210 +1,210 @@ /*- * Copyright (c) 2018 Limelight Networks, Inc. * Copyright (c) 2014-2018 Mellanox Technologies, Ltd. * Copyright (c) 2015 François Tigeot * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_KTIME_H #define _LINUX_KTIME_H #include #include #include #define ktime_get_ts(x) getnanouptime(x) /* time values in nanoseconds */ typedef s64 ktime_t; -#define KTIME_MAX ((s64)~((u64)1 << 63)) -#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) +#define KTIME_MAX ((s64)~((u64)1 << 63)) +#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) static inline int64_t ktime_to_ns(ktime_t kt) { return (kt); } static inline ktime_t ns_to_ktime(uint64_t nsec) { return (nsec); } static inline int64_t ktime_divns(const ktime_t kt, int64_t div) { return (kt / div); } static inline int64_t ktime_to_us(ktime_t kt) { return (ktime_divns(kt, NSEC_PER_USEC)); } static inline int64_t ktime_to_ms(ktime_t kt) { return (ktime_divns(kt, NSEC_PER_MSEC)); } static inline struct timeval ktime_to_timeval(ktime_t kt) { return (ns_to_timeval(kt)); } static inline ktime_t ktime_add_ns(ktime_t kt, int64_t ns) { return (kt + ns); } static inline ktime_t ktime_sub_ns(ktime_t kt, int64_t ns) { return (kt - ns); } static inline ktime_t ktime_set(const long secs, const unsigned long nsecs) { ktime_t retval = {(s64) secs * NSEC_PER_SEC + (s64) nsecs}; return (retval); } static inline ktime_t ktime_sub(ktime_t lhs, ktime_t rhs) { return (lhs - rhs); } static inline int64_t ktime_us_delta(ktime_t later, ktime_t earlier) { ktime_t diff = ktime_sub(later, earlier); return (ktime_to_us(diff)); } static inline int64_t ktime_ms_delta(ktime_t later, ktime_t earlier) { ktime_t diff = ktime_sub(later, earlier); return (ktime_to_ms(diff)); } static inline ktime_t ktime_add(ktime_t lhs, ktime_t rhs) { return (lhs + rhs); } static inline ktime_t timespec_to_ktime(struct timespec ts) { return (ktime_set(ts.tv_sec, ts.tv_nsec)); } static inline ktime_t timeval_to_ktime(struct timeval tv) { return (ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC)); } #define ktime_to_timespec(kt) ns_to_timespec(kt) #define ktime_to_timeval(kt) ns_to_timeval(kt) #define ktime_to_ns(kt) (kt) static inline int64_t ktime_get_ns(void) { struct timespec ts; ktime_get_ts(&ts); return (ktime_to_ns(timespec_to_ktime(ts))); } static inline ktime_t ktime_get(void) { struct timespec ts; ktime_get_ts(&ts); return (timespec_to_ktime(ts)); } static inline ktime_t ktime_get_boottime(void) { struct timespec ts; nanouptime(&ts); return (timespec_to_ktime(ts)); } static inline ktime_t ktime_get_real(void) { struct timespec ts; nanotime(&ts); return (timespec_to_ktime(ts)); } static inline ktime_t ktime_get_real_seconds(void) { struct timespec ts; nanotime(&ts); return (ts.tv_sec); } static inline ktime_t ktime_get_raw(void) { struct timespec ts; nanotime(&ts); return (timespec_to_ktime(ts)); } static inline u64 ktime_get_raw_ns(void) { struct timespec ts; nanouptime(&ts); return (ktime_to_ns(timespec_to_ktime(ts))); } #endif /* _LINUX_KTIME_H */ Index: head/sys/compat/linuxkpi/common/include/linux/list.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/list.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/list.h (revision 331433) @@ -1,474 +1,474 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2016 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_LIST_H_ #define _LINUX_LIST_H_ /* * Since LIST_HEAD conflicts with the linux definition we must include any * FreeBSD header which requires it here so it is resolved with the correct * definition prior to the undef. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef prefetch #define prefetch(x) #endif #define LINUX_LIST_HEAD_INIT(name) { &(name), &(name) } #define LINUX_LIST_HEAD(name) \ struct list_head name = LINUX_LIST_HEAD_INIT(name) #ifndef LIST_HEAD_DEF #define LIST_HEAD_DEF struct list_head { struct list_head *next; struct list_head *prev; }; #endif static inline void INIT_LIST_HEAD(struct list_head *list) { list->next = list->prev = list; } - + static inline int list_empty(const struct list_head *head) { return (head->next == head); } static inline int list_empty_careful(const struct list_head *head) { struct list_head *next = head->next; return ((next == head) && (next == head->prev)); } static inline void __list_del(struct list_head *prev, struct list_head *next) { next->prev = prev; WRITE_ONCE(prev->next, next); } static inline void __list_del_entry(struct list_head *entry) { __list_del(entry->prev, entry->next); } static inline void list_del(struct list_head *entry) { __list_del(entry->prev, entry->next); } static inline void list_replace(struct list_head *old, struct list_head *new) { new->next = old->next; new->next->prev = new; new->prev = old->prev; new->prev->next = new; } static inline void list_replace_init(struct list_head *old, struct list_head *new) { list_replace(old, new); INIT_LIST_HEAD(old); } static inline void linux_list_add(struct list_head *new, struct list_head *prev, struct list_head *next) { next->prev = new; new->next = next; new->prev = prev; prev->next = new; } static inline void list_del_init(struct list_head *entry) -{ +{ list_del(entry); INIT_LIST_HEAD(entry); } #define list_entry(ptr, type, field) container_of(ptr, type, field) -#define list_first_entry(ptr, type, member) \ - list_entry((ptr)->next, type, member) +#define list_first_entry(ptr, type, member) \ + list_entry((ptr)->next, type, member) #define list_last_entry(ptr, type, member) \ list_entry((ptr)->prev, type, member) #define list_first_entry_or_null(ptr, type, member) \ (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) #define list_next_entry(ptr, member) \ list_entry(((ptr)->member.next), typeof(*(ptr)), member) #define list_safe_reset_next(ptr, n, member) \ (n) = list_next_entry(ptr, member) #define list_prev_entry(ptr, member) \ list_entry(((ptr)->member.prev), typeof(*(ptr)), member) #define list_for_each(p, head) \ for (p = (head)->next; p != (head); p = (p)->next) #define list_for_each_safe(p, n, head) \ for (p = (head)->next, n = (p)->next; p != (head); p = n, n = (p)->next) #define list_for_each_entry(p, h, field) \ for (p = list_entry((h)->next, typeof(*p), field); &(p)->field != (h); \ p = list_entry((p)->field.next, typeof(*p), field)) #define list_for_each_entry_safe(p, n, h, field) \ - for (p = list_entry((h)->next, typeof(*p), field), \ + for (p = list_entry((h)->next, typeof(*p), field), \ n = list_entry((p)->field.next, typeof(*p), field); &(p)->field != (h);\ p = n, n = list_entry(n->field.next, typeof(*n), field)) #define list_for_each_entry_from(p, h, field) \ for ( ; &(p)->field != (h); \ p = list_entry((p)->field.next, typeof(*p), field)) #define list_for_each_entry_continue(p, h, field) \ for (p = list_next_entry((p), field); &(p)->field != (h); \ p = list_next_entry((p), field)) -#define list_for_each_entry_safe_from(pos, n, head, member) \ +#define list_for_each_entry_safe_from(pos, n, head, member) \ for (n = list_entry((pos)->member.next, typeof(*pos), member); \ &(pos)->member != (head); \ pos = n, n = list_entry(n->member.next, typeof(*n), member)) #define list_for_each_entry_reverse(p, h, field) \ for (p = list_entry((h)->prev, typeof(*p), field); &(p)->field != (h); \ p = list_entry((p)->field.prev, typeof(*p), field)) #define list_for_each_entry_safe_reverse(p, n, h, field) \ - for (p = list_entry((h)->prev, typeof(*p), field), \ + for (p = list_entry((h)->prev, typeof(*p), field), \ n = list_entry((p)->field.prev, typeof(*p), field); &(p)->field != (h); \ p = n, n = list_entry(n->field.prev, typeof(*n), field)) #define list_for_each_entry_continue_reverse(p, h, field) \ for (p = list_entry((p)->field.prev, typeof(*p), field); &(p)->field != (h); \ p = list_entry((p)->field.prev, typeof(*p), field)) #define list_for_each_prev(p, h) for (p = (h)->prev; p != (h); p = (p)->prev) static inline void list_add(struct list_head *new, struct list_head *head) { linux_list_add(new, head, head->next); } static inline void list_add_tail(struct list_head *new, struct list_head *head) { linux_list_add(new, head->prev, head); } static inline void list_move(struct list_head *list, struct list_head *head) { list_del(list); list_add(list, head); } static inline void list_move_tail(struct list_head *entry, struct list_head *head) { list_del(entry); list_add_tail(entry, head); } static inline void -linux_list_splice(const struct list_head *list, struct list_head *prev, +linux_list_splice(const struct list_head *list, struct list_head *prev, struct list_head *next) { struct list_head *first; struct list_head *last; if (list_empty(list)) return; first = list->next; last = list->prev; first->prev = prev; prev->next = first; last->next = next; next->prev = last; } static inline void list_splice(const struct list_head *list, struct list_head *head) { linux_list_splice(list, head, head->next); -} +} static inline void list_splice_tail(struct list_head *list, struct list_head *head) { linux_list_splice(list, head->prev, head); } - + static inline void list_splice_init(struct list_head *list, struct list_head *head) { linux_list_splice(list, head, head->next); - INIT_LIST_HEAD(list); + INIT_LIST_HEAD(list); } - + static inline void list_splice_tail_init(struct list_head *list, struct list_head *head) { linux_list_splice(list, head->prev, head); INIT_LIST_HEAD(list); } #undef LIST_HEAD #define LIST_HEAD(name) struct list_head name = { &(name), &(name) } struct hlist_head { struct hlist_node *first; }; struct hlist_node { struct hlist_node *next, **pprev; }; #define HLIST_HEAD_INIT { } #define HLIST_HEAD(name) struct hlist_head name = HLIST_HEAD_INIT #define INIT_HLIST_HEAD(head) (head)->first = NULL #define INIT_HLIST_NODE(node) \ do { \ (node)->next = NULL; \ (node)->pprev = NULL; \ } while (0) static inline int hlist_unhashed(const struct hlist_node *h) { return !h->pprev; } static inline int hlist_empty(const struct hlist_head *h) { return !h->first; } static inline void hlist_del(struct hlist_node *n) { - if (n->next) - n->next->pprev = n->pprev; - *n->pprev = n->next; + if (n->next) + n->next->pprev = n->pprev; + *n->pprev = n->next; } static inline void hlist_del_init(struct hlist_node *n) { if (hlist_unhashed(n)) return; hlist_del(n); INIT_HLIST_NODE(n); } static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) { n->next = h->first; if (h->first) h->first->pprev = &n->next; h->first = n; n->pprev = &h->first; } static inline void hlist_add_before(struct hlist_node *n, struct hlist_node *next) { n->pprev = next->pprev; n->next = next; next->pprev = &n->next; *(n->pprev) = n; } - + static inline void hlist_add_after(struct hlist_node *n, struct hlist_node *next) { next->next = n->next; n->next = next; next->pprev = &n->next; if (next->next) next->next->pprev = &next->next; } - + static inline void hlist_move_list(struct hlist_head *old, struct hlist_head *new) { new->first = old->first; if (new->first) new->first->pprev = &new->first; old->first = NULL; } static inline int list_is_singular(const struct list_head *head) { return !list_empty(head) && (head->next == head->prev); } static inline void __list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry) { struct list_head *new_first = entry->next; list->next = head->next; list->next->prev = list; list->prev = entry; entry->next = list; head->next = new_first; new_first->prev = head; } static inline void list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry) { if (list_empty(head)) return; if (list_is_singular(head) && (head->next != entry && head != entry)) return; if (entry == head) INIT_LIST_HEAD(list); else __list_cut_position(list, head, entry); } static inline int list_is_last(const struct list_head *list, - const struct list_head *head) + const struct list_head *head) { - return list->next == head; + return list->next == head; } - + #define hlist_entry(ptr, type, field) container_of(ptr, type, field) #define hlist_for_each(p, head) \ for (p = (head)->first; p; p = (p)->next) #define hlist_for_each_safe(p, n, head) \ for (p = (head)->first; p && ({ n = (p)->next; 1; }); p = n) #define hlist_entry_safe(ptr, type, member) \ ((ptr) ? hlist_entry(ptr, type, member) : NULL) #define hlist_for_each_entry(pos, head, member) \ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\ pos; \ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) #define hlist_for_each_entry_continue(pos, member) \ for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member); \ (pos); \ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) #define hlist_for_each_entry_from(pos, member) \ for (; (pos); \ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) #define hlist_for_each_entry_safe(pos, n, head, member) \ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ (pos) && ({ n = (pos)->member.next; 1; }); \ pos = hlist_entry_safe(n, typeof(*(pos)), member)) extern void list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, struct list_head *a, struct list_head *b)); #endif /* _LINUX_LIST_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/log2.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/log2.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/log2.h (revision 331433) @@ -1,131 +1,131 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2015 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_LOG2_H_ #define _LINUX_LOG2_H_ #include #include static inline unsigned long roundup_pow_of_two(unsigned long x) { return (1UL << flsl(x - 1)); } static inline int is_power_of_2(unsigned long n) { return (n == roundup_pow_of_two(n)); } static inline unsigned long rounddown_pow_of_two(unsigned long x) { - return (1UL << (flsl(x) - 1)); + return (1UL << (flsl(x) - 1)); } #define ilog2(n) \ ( \ __builtin_constant_p(n) ? ( \ (n) < 1 ? -1 : \ (n) & (1ULL << 63) ? 63 : \ (n) & (1ULL << 62) ? 62 : \ (n) & (1ULL << 61) ? 61 : \ (n) & (1ULL << 60) ? 60 : \ (n) & (1ULL << 59) ? 59 : \ (n) & (1ULL << 58) ? 58 : \ (n) & (1ULL << 57) ? 57 : \ (n) & (1ULL << 56) ? 56 : \ (n) & (1ULL << 55) ? 55 : \ (n) & (1ULL << 54) ? 54 : \ (n) & (1ULL << 53) ? 53 : \ (n) & (1ULL << 52) ? 52 : \ (n) & (1ULL << 51) ? 51 : \ (n) & (1ULL << 50) ? 50 : \ (n) & (1ULL << 49) ? 49 : \ (n) & (1ULL << 48) ? 48 : \ (n) & (1ULL << 47) ? 47 : \ (n) & (1ULL << 46) ? 46 : \ (n) & (1ULL << 45) ? 45 : \ (n) & (1ULL << 44) ? 44 : \ (n) & (1ULL << 43) ? 43 : \ (n) & (1ULL << 42) ? 42 : \ (n) & (1ULL << 41) ? 41 : \ (n) & (1ULL << 40) ? 40 : \ (n) & (1ULL << 39) ? 39 : \ (n) & (1ULL << 38) ? 38 : \ (n) & (1ULL << 37) ? 37 : \ (n) & (1ULL << 36) ? 36 : \ (n) & (1ULL << 35) ? 35 : \ (n) & (1ULL << 34) ? 34 : \ (n) & (1ULL << 33) ? 33 : \ (n) & (1ULL << 32) ? 32 : \ (n) & (1ULL << 31) ? 31 : \ (n) & (1ULL << 30) ? 30 : \ (n) & (1ULL << 29) ? 29 : \ (n) & (1ULL << 28) ? 28 : \ (n) & (1ULL << 27) ? 27 : \ (n) & (1ULL << 26) ? 26 : \ (n) & (1ULL << 25) ? 25 : \ (n) & (1ULL << 24) ? 24 : \ (n) & (1ULL << 23) ? 23 : \ (n) & (1ULL << 22) ? 22 : \ (n) & (1ULL << 21) ? 21 : \ (n) & (1ULL << 20) ? 20 : \ (n) & (1ULL << 19) ? 19 : \ (n) & (1ULL << 18) ? 18 : \ (n) & (1ULL << 17) ? 17 : \ (n) & (1ULL << 16) ? 16 : \ (n) & (1ULL << 15) ? 15 : \ (n) & (1ULL << 14) ? 14 : \ (n) & (1ULL << 13) ? 13 : \ (n) & (1ULL << 12) ? 12 : \ (n) & (1ULL << 11) ? 11 : \ (n) & (1ULL << 10) ? 10 : \ (n) & (1ULL << 9) ? 9 : \ (n) & (1ULL << 8) ? 8 : \ (n) & (1ULL << 7) ? 7 : \ (n) & (1ULL << 6) ? 6 : \ (n) & (1ULL << 5) ? 5 : \ (n) & (1ULL << 4) ? 4 : \ (n) & (1ULL << 3) ? 3 : \ (n) & (1ULL << 2) ? 2 : \ (n) & (1ULL << 1) ? 1 : \ (n) & (1ULL << 0) ? 0 : \ -1) : \ (sizeof(n) <= 4) ? \ fls((u32)(n)) - 1 : flsll((u64)(n)) - 1 \ ) #define order_base_2(x) ilog2(roundup_pow_of_two(x)) #endif /* _LINUX_LOG2_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/miscdevice.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/miscdevice.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/miscdevice.h (revision 331433) @@ -1,76 +1,76 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_MISCDEVICE_H_ #define _LINUX_MISCDEVICE_H_ #define MISC_DYNAMIC_MINOR -1 #include #include struct miscdevice { const char *name; struct device *this_device; const struct file_operations *fops; struct cdev *cdev; int minor; const char *nodename; umode_t mode; }; extern struct class linux_class_misc; static inline int misc_register(struct miscdevice *misc) { misc->this_device = device_create(&linux_class_misc, &linux_root_device, 0, misc, misc->name); misc->cdev = cdev_alloc(); if (misc->cdev == NULL) return -ENOMEM; misc->cdev->owner = THIS_MODULE; misc->cdev->ops = misc->fops; kobject_set_name(&misc->cdev->kobj, misc->name); - if (cdev_add(misc->cdev, misc->this_device->devt, 1)) + if (cdev_add(misc->cdev, misc->this_device->devt, 1)) return -EINVAL; return (0); } static inline int misc_deregister(struct miscdevice *misc) { device_destroy(&linux_class_misc, misc->this_device->devt); cdev_del(misc->cdev); return (0); } #endif /* _LINUX_MISCDEVICE_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/mutex.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/mutex.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/mutex.h (revision 331433) @@ -1,148 +1,148 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_MUTEX_H_ #define _LINUX_MUTEX_H_ #include #include #include #include #include typedef struct mutex { struct sx sx; } mutex_t; /* * By defining CONFIG_NO_MUTEX_SKIP LinuxKPI mutexes and asserts will * not be skipped during panic(). */ #ifdef CONFIG_NO_MUTEX_SKIP #define MUTEX_SKIP(void) 0 #else #define MUTEX_SKIP(void) unlikely(SCHEDULER_STOPPED() || kdb_active) #endif #define mutex_lock(_m) do { \ if (MUTEX_SKIP()) \ break; \ sx_xlock(&(_m)->sx); \ } while (0) #define mutex_lock_nested(_m, _s) mutex_lock(_m) #define mutex_lock_nest_lock(_m, _s) mutex_lock(_m) #define mutex_lock_interruptible(_m) ({ \ MUTEX_SKIP() ? 0 : \ linux_mutex_lock_interruptible(_m); \ }) #define mutex_unlock(_m) do { \ if (MUTEX_SKIP()) \ break; \ sx_xunlock(&(_m)->sx); \ } while (0) #define mutex_trylock(_m) ({ \ MUTEX_SKIP() ? 1 : \ !!sx_try_xlock(&(_m)->sx); \ }) enum mutex_trylock_recursive_enum { MUTEX_TRYLOCK_FAILED = 0, MUTEX_TRYLOCK_SUCCESS = 1, MUTEX_TRYLOCK_RECURSIVE = 2, }; static inline __must_check enum mutex_trylock_recursive_enum mutex_trylock_recursive(struct mutex *lock) { if (unlikely(sx_xholder(&lock->sx) == curthread)) return (MUTEX_TRYLOCK_RECURSIVE); return (mutex_trylock(lock)); } #define mutex_init(_m) \ linux_mutex_init(_m, mutex_name(#_m), SX_NOWITNESS) #define mutex_init_witness(_m) \ linux_mutex_init(_m, mutex_name(#_m), SX_DUPOK) #define mutex_destroy(_m) \ linux_mutex_destroy(_m) static inline bool mutex_is_locked(mutex_t *m) { return ((struct thread *)SX_OWNER(m->sx.sx_lock) != NULL); } static inline bool mutex_is_owned(mutex_t *m) { return (sx_xlocked(&m->sx)); } #ifdef WITNESS_ALL /* NOTE: the maximum WITNESS name is 64 chars */ #define __mutex_name(name, file, line) \ - (((const char *){file ":" #line "-" name}) + \ + (((const char *){file ":" #line "-" name}) + \ (sizeof(file) > 16 ? sizeof(file) - 16 : 0)) #else #define __mutex_name(name, file, line) name #endif #define _mutex_name(...) __mutex_name(__VA_ARGS__) #define mutex_name(name) _mutex_name(name, __FILE__, __LINE__) #define DEFINE_MUTEX(lock) \ mutex_t lock; \ SX_SYSINIT_FLAGS(lock, &(lock).sx, mutex_name(#lock), SX_DUPOK) static inline void linux_mutex_init(mutex_t *m, const char *name, int flags) { memset(m, 0, sizeof(*m)); sx_init_flags(&m->sx, name, flags); } static inline void linux_mutex_destroy(mutex_t *m) { if (mutex_is_owned(m)) mutex_unlock(m); sx_destroy(&m->sx); } extern int linux_mutex_lock_interruptible(mutex_t *m); #endif /* _LINUX_MUTEX_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/pci.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/pci.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/pci.h (revision 331433) @@ -1,848 +1,846 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2016 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_PCI_H_ #define _LINUX_PCI_H_ #define CONFIG_PCI_MSI #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct pci_device_id { uint32_t vendor; uint32_t device; uint32_t subvendor; uint32_t subdevice; uint32_t class; uint32_t class_mask; uintptr_t driver_data; }; #define MODULE_DEVICE_TABLE(bus, table) #define PCI_BASE_CLASS_DISPLAY 0x03 #define PCI_CLASS_DISPLAY_VGA 0x0300 #define PCI_CLASS_DISPLAY_OTHER 0x0380 #define PCI_BASE_CLASS_BRIDGE 0x06 #define PCI_CLASS_BRIDGE_ISA 0x0601 #define PCI_ANY_ID (-1) #define PCI_VENDOR_ID_APPLE 0x106b #define PCI_VENDOR_ID_ASUSTEK 0x1043 #define PCI_VENDOR_ID_ATI 0x1002 #define PCI_VENDOR_ID_DELL 0x1028 #define PCI_VENDOR_ID_HP 0x103c #define PCI_VENDOR_ID_IBM 0x1014 #define PCI_VENDOR_ID_INTEL 0x8086 #define PCI_VENDOR_ID_MELLANOX 0x15b3 #define PCI_VENDOR_ID_REDHAT_QUMRANET 0x1af4 #define PCI_VENDOR_ID_SERVERWORKS 0x1166 #define PCI_VENDOR_ID_SONY 0x104d #define PCI_VENDOR_ID_TOPSPIN 0x1867 #define PCI_VENDOR_ID_VIA 0x1106 #define PCI_SUBVENDOR_ID_REDHAT_QUMRANET 0x1af4 #define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159 #define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44 #define PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE 0x5a46 #define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278 #define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282 #define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c #define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274 #define PCI_SUBDEVICE_ID_QEMU 0x1100 #define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07)) -#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) -#define PCI_FUNC(devfn) ((devfn) & 0x07) +#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) +#define PCI_FUNC(devfn) ((devfn) & 0x07) #define PCI_VDEVICE(_vendor, _device) \ .vendor = PCI_VENDOR_ID_##_vendor, .device = (_device), \ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID #define PCI_DEVICE(_vendor, _device) \ .vendor = (_vendor), .device = (_device), \ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID #define to_pci_dev(n) container_of(n, struct pci_dev, dev) #define PCI_VENDOR_ID PCIR_DEVVENDOR #define PCI_COMMAND PCIR_COMMAND #define PCI_EXP_DEVCTL PCIER_DEVICE_CTL /* Device Control */ #define PCI_EXP_LNKCTL PCIER_LINK_CTL /* Link Control */ #define PCI_EXP_FLAGS_TYPE PCIEM_FLAGS_TYPE /* Device/Port type */ #define PCI_EXP_DEVCAP PCIER_DEVICE_CAP /* Device capabilities */ #define PCI_EXP_DEVSTA PCIER_DEVICE_STA /* Device Status */ #define PCI_EXP_LNKCAP PCIER_LINK_CAP /* Link Capabilities */ #define PCI_EXP_LNKSTA PCIER_LINK_STA /* Link Status */ #define PCI_EXP_SLTCAP PCIER_SLOT_CAP /* Slot Capabilities */ #define PCI_EXP_SLTCTL PCIER_SLOT_CTL /* Slot Control */ #define PCI_EXP_SLTSTA PCIER_SLOT_STA /* Slot Status */ #define PCI_EXP_RTCTL PCIER_ROOT_CTL /* Root Control */ #define PCI_EXP_RTCAP PCIER_ROOT_CAP /* Root Capabilities */ #define PCI_EXP_RTSTA PCIER_ROOT_STA /* Root Status */ #define PCI_EXP_DEVCAP2 PCIER_DEVICE_CAP2 /* Device Capabilities 2 */ #define PCI_EXP_DEVCTL2 PCIER_DEVICE_CTL2 /* Device Control 2 */ #define PCI_EXP_LNKCAP2 PCIER_LINK_CAP2 /* Link Capabilities 2 */ #define PCI_EXP_LNKCTL2 PCIER_LINK_CTL2 /* Link Control 2 */ #define PCI_EXP_LNKSTA2 PCIER_LINK_STA2 /* Link Status 2 */ #define PCI_EXP_FLAGS PCIER_FLAGS /* Capabilities register */ #define PCI_EXP_FLAGS_VERS PCIEM_FLAGS_VERSION /* Capability version */ #define PCI_EXP_TYPE_ROOT_PORT PCIEM_TYPE_ROOT_PORT /* Root Port */ #define PCI_EXP_TYPE_ENDPOINT PCIEM_TYPE_ENDPOINT /* Express Endpoint */ #define PCI_EXP_TYPE_LEG_END PCIEM_TYPE_LEGACY_ENDPOINT /* Legacy Endpoint */ #define PCI_EXP_TYPE_DOWNSTREAM PCIEM_TYPE_DOWNSTREAM_PORT /* Downstream Port */ #define PCI_EXP_FLAGS_SLOT PCIEM_FLAGS_SLOT /* Slot implemented */ #define PCI_EXP_TYPE_RC_EC PCIEM_TYPE_ROOT_EC /* Root Complex Event Collector */ #define PCI_EXP_LNKCAP_SLS_2_5GB 0x01 /* Supported Link Speed 2.5GT/s */ #define PCI_EXP_LNKCAP_SLS_5_0GB 0x02 /* Supported Link Speed 5.0GT/s */ #define PCI_EXP_LNKCAP_MLW 0x03f0 /* Maximum Link Width */ #define PCI_EXP_LNKCAP2_SLS_2_5GB 0x02 /* Supported Link Speed 2.5GT/s */ #define PCI_EXP_LNKCAP2_SLS_5_0GB 0x04 /* Supported Link Speed 5.0GT/s */ #define PCI_EXP_LNKCAP2_SLS_8_0GB 0x08 /* Supported Link Speed 8.0GT/s */ #define PCI_EXP_LNKCTL_HAWD PCIEM_LINK_CTL_HAWD #define PCI_EXP_LNKCAP_CLKPM 0x00040000 #define PCI_EXP_DEVSTA_TRPND 0x0020 #define IORESOURCE_MEM (1 << SYS_RES_MEMORY) #define IORESOURCE_IO (1 << SYS_RES_IOPORT) #define IORESOURCE_IRQ (1 << SYS_RES_IRQ) enum pci_bus_speed { PCI_SPEED_UNKNOWN = -1, PCIE_SPEED_2_5GT, PCIE_SPEED_5_0GT, PCIE_SPEED_8_0GT, }; enum pcie_link_width { PCIE_LNK_WIDTH_UNKNOWN = 0xFF, }; typedef int pci_power_t; #define PCI_D0 PCI_POWERSTATE_D0 #define PCI_D1 PCI_POWERSTATE_D1 #define PCI_D2 PCI_POWERSTATE_D2 #define PCI_D3hot PCI_POWERSTATE_D3 #define PCI_D3cold 4 #define PCI_POWER_ERROR PCI_POWERSTATE_UNKNOWN struct pci_dev; struct pci_driver { struct list_head links; char *name; const struct pci_device_id *id_table; int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); void (*remove)(struct pci_dev *dev); int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */ int (*resume) (struct pci_dev *dev); /* Device woken up */ void (*shutdown) (struct pci_dev *dev); /* Device shutdown */ driver_t bsddriver; devclass_t bsdclass; struct device_driver driver; const struct pci_error_handlers *err_handler; bool isdrm; }; struct pci_bus { struct pci_dev *self; int number; }; extern struct list_head pci_drivers; extern struct list_head pci_devices; extern spinlock_t pci_lock; #define __devexit_p(x) x struct pci_dev { struct device dev; struct list_head links; struct pci_driver *pdrv; struct pci_bus *bus; uint64_t dma_mask; uint16_t device; uint16_t vendor; uint16_t subsystem_vendor; uint16_t subsystem_device; unsigned int irq; unsigned int devfn; uint32_t class; uint8_t revision; }; static inline struct resource_list_entry * linux_pci_get_rle(struct pci_dev *pdev, int type, int rid) { struct pci_devinfo *dinfo; struct resource_list *rl; dinfo = device_get_ivars(pdev->dev.bsddev); rl = &dinfo->resources; return resource_list_find(rl, type, rid); } static inline struct resource_list_entry * linux_pci_get_bar(struct pci_dev *pdev, int bar) { struct resource_list_entry *rle; bar = PCIR_BAR(bar); if ((rle = linux_pci_get_rle(pdev, SYS_RES_MEMORY, bar)) == NULL) rle = linux_pci_get_rle(pdev, SYS_RES_IOPORT, bar); return (rle); } static inline struct device * linux_pci_find_irq_dev(unsigned int irq) { struct pci_dev *pdev; struct device *found; found = NULL; spin_lock(&pci_lock); list_for_each_entry(pdev, &pci_devices, links) { if (irq == pdev->dev.irq || (irq >= pdev->dev.msix && irq < pdev->dev.msix_max)) { found = &pdev->dev; break; } } spin_unlock(&pci_lock); return (found); } static inline unsigned long pci_resource_start(struct pci_dev *pdev, int bar) { struct resource_list_entry *rle; if ((rle = linux_pci_get_bar(pdev, bar)) == NULL) return (0); return rle->start; } static inline unsigned long pci_resource_len(struct pci_dev *pdev, int bar) { struct resource_list_entry *rle; if ((rle = linux_pci_get_bar(pdev, bar)) == NULL) return (0); return rle->count; } static inline int pci_resource_type(struct pci_dev *pdev, int bar) { struct pci_map *pm; pm = pci_find_bar(pdev->dev.bsddev, PCIR_BAR(bar)); if (!pm) return (-1); if (PCI_BAR_IO(pm->pm_value)) return (SYS_RES_IOPORT); else return (SYS_RES_MEMORY); } /* * All drivers just seem to want to inspect the type not flags. */ static inline int pci_resource_flags(struct pci_dev *pdev, int bar) { int type; type = pci_resource_type(pdev, bar); if (type < 0) return (0); return (1 << type); } static inline const char * pci_name(struct pci_dev *d) { return device_get_desc(d->dev.bsddev); } static inline void * pci_get_drvdata(struct pci_dev *pdev) { return dev_get_drvdata(&pdev->dev); } static inline void pci_set_drvdata(struct pci_dev *pdev, void *data) { dev_set_drvdata(&pdev->dev, data); } static inline int pci_enable_device(struct pci_dev *pdev) { pci_enable_io(pdev->dev.bsddev, SYS_RES_IOPORT); pci_enable_io(pdev->dev.bsddev, SYS_RES_MEMORY); return (0); } static inline void pci_disable_device(struct pci_dev *pdev) { pci_disable_io(pdev->dev.bsddev, SYS_RES_IOPORT); pci_disable_io(pdev->dev.bsddev, SYS_RES_MEMORY); pci_disable_busmaster(pdev->dev.bsddev); } static inline int pci_set_master(struct pci_dev *pdev) { pci_enable_busmaster(pdev->dev.bsddev); return (0); } static inline int pci_set_power_state(struct pci_dev *pdev, int state) { pci_set_powerstate(pdev->dev.bsddev, state); return (0); } static inline int pci_clear_master(struct pci_dev *pdev) { pci_disable_busmaster(pdev->dev.bsddev); return (0); } static inline int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) { int rid; int type; type = pci_resource_type(pdev, bar); if (type < 0) return (-ENODEV); rid = PCIR_BAR(bar); if (bus_alloc_resource_any(pdev->dev.bsddev, type, &rid, RF_ACTIVE) == NULL) return (-EINVAL); return (0); } static inline void pci_release_region(struct pci_dev *pdev, int bar) { struct resource_list_entry *rle; if ((rle = linux_pci_get_bar(pdev, bar)) == NULL) return; bus_release_resource(pdev->dev.bsddev, rle->type, rle->rid, rle->res); } static inline void pci_release_regions(struct pci_dev *pdev) { int i; for (i = 0; i <= PCIR_MAX_BAR_0; i++) pci_release_region(pdev, i); } static inline int pci_request_regions(struct pci_dev *pdev, const char *res_name) { int error; int i; for (i = 0; i <= PCIR_MAX_BAR_0; i++) { error = pci_request_region(pdev, i, res_name); if (error && error != -ENODEV) { pci_release_regions(pdev); return (error); } } return (0); } static inline void pci_disable_msix(struct pci_dev *pdev) { pci_release_msi(pdev->dev.bsddev); /* * The MSIX IRQ numbers associated with this PCI device are no * longer valid and might be re-assigned. Make sure * linux_pci_find_irq_dev() does no longer see them by * resetting their references to zero: */ pdev->dev.msix = 0; pdev->dev.msix_max = 0; } static inline bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar) { return (pci_resource_start(pdev, bar)); } #define PCI_CAP_ID_EXP PCIY_EXPRESS #define PCI_CAP_ID_PCIX PCIY_PCIX #define PCI_CAP_ID_AGP PCIY_AGP #define PCI_CAP_ID_PM PCIY_PMG #define PCI_EXP_DEVCTL PCIER_DEVICE_CTL #define PCI_EXP_DEVCTL_PAYLOAD PCIEM_CTL_MAX_PAYLOAD #define PCI_EXP_DEVCTL_READRQ PCIEM_CTL_MAX_READ_REQUEST #define PCI_EXP_LNKCTL PCIER_LINK_CTL #define PCI_EXP_LNKSTA PCIER_LINK_STA static inline int pci_find_capability(struct pci_dev *pdev, int capid) { int reg; if (pci_find_cap(pdev->dev.bsddev, capid, ®)) return (0); return (reg); } static inline int pci_pcie_cap(struct pci_dev *dev) { - return pci_find_capability(dev, PCI_CAP_ID_EXP); + return pci_find_capability(dev, PCI_CAP_ID_EXP); } static inline int pci_read_config_byte(struct pci_dev *pdev, int where, u8 *val) { *val = (u8)pci_read_config(pdev->dev.bsddev, where, 1); return (0); } static inline int pci_read_config_word(struct pci_dev *pdev, int where, u16 *val) { *val = (u16)pci_read_config(pdev->dev.bsddev, where, 2); return (0); } static inline int pci_read_config_dword(struct pci_dev *pdev, int where, u32 *val) { *val = (u32)pci_read_config(pdev->dev.bsddev, where, 4); return (0); } static inline int pci_write_config_byte(struct pci_dev *pdev, int where, u8 val) { pci_write_config(pdev->dev.bsddev, where, val, 1); return (0); } static inline int pci_write_config_word(struct pci_dev *pdev, int where, u16 val) { pci_write_config(pdev->dev.bsddev, where, val, 2); return (0); } static inline int pci_write_config_dword(struct pci_dev *pdev, int where, u32 val) { pci_write_config(pdev->dev.bsddev, where, val, 4); return (0); } int linux_pci_register_driver(struct pci_driver *pdrv); int linux_pci_register_drm_driver(struct pci_driver *pdrv); void linux_pci_unregister_driver(struct pci_driver *pdrv); #define pci_register_driver(pdrv) linux_pci_register_driver(pdrv) #define pci_unregister_driver(pdrv) linux_pci_unregister_driver(pdrv) struct msix_entry { int entry; int vector; }; /* * Enable msix, positive errors indicate actual number of available * vectors. Negative errors are failures. * * NB: define added to prevent this definition of pci_enable_msix from * clashing with the native FreeBSD version. */ #define pci_enable_msix linux_pci_enable_msix static inline int pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries, int nreq) { struct resource_list_entry *rle; int error; int avail; int i; avail = pci_msix_count(pdev->dev.bsddev); if (avail < nreq) { if (avail == 0) return -EINVAL; return avail; } avail = nreq; if ((error = -pci_alloc_msix(pdev->dev.bsddev, &avail)) != 0) return error; /* * Handle case where "pci_alloc_msix()" may allocate less * interrupts than available and return with no error: */ if (avail < nreq) { pci_release_msi(pdev->dev.bsddev); return avail; } rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1); pdev->dev.msix = rle->start; pdev->dev.msix_max = rle->start + avail; for (i = 0; i < nreq; i++) entries[i].vector = pdev->dev.msix + i; return (0); } #define pci_enable_msix_range linux_pci_enable_msix_range static inline int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec, int maxvec) { int nvec = maxvec; int rc; if (maxvec < minvec) return (-ERANGE); do { rc = pci_enable_msix(dev, entries, nvec); if (rc < 0) { return (rc); } else if (rc > 0) { if (rc < minvec) return (-ENOSPC); nvec = rc; } } while (rc); return (nvec); } static inline int pci_channel_offline(struct pci_dev *pdev) { - return false; + return false; } static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) { - return -ENODEV; + return -ENODEV; } static inline void pci_disable_sriov(struct pci_dev *dev) { } #define DEFINE_PCI_DEVICE_TABLE(_table) \ const struct pci_device_id _table[] __devinitdata /* XXX This should not be necessary. */ #define pcix_set_mmrbc(d, v) 0 #define pcix_get_max_mmrbc(d) 0 #define pcie_set_readrq(d, v) 0 #define PCI_DMA_BIDIRECTIONAL 0 #define PCI_DMA_TODEVICE 1 #define PCI_DMA_FROMDEVICE 2 #define PCI_DMA_NONE 3 #define pci_pool dma_pool #define pci_pool_destroy(...) dma_pool_destroy(__VA_ARGS__) #define pci_pool_alloc(...) dma_pool_alloc(__VA_ARGS__) #define pci_pool_free(...) dma_pool_free(__VA_ARGS__) #define pci_pool_create(_name, _pdev, _size, _align, _alloc) \ dma_pool_create(_name, &(_pdev)->dev, _size, _align, _alloc) #define pci_free_consistent(_hwdev, _size, _vaddr, _dma_handle) \ dma_free_coherent((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \ _size, _vaddr, _dma_handle) #define pci_map_sg(_hwdev, _sg, _nents, _dir) \ dma_map_sg((_hwdev) == NULL ? NULL : &(_hwdev->dev), \ _sg, _nents, (enum dma_data_direction)_dir) #define pci_map_single(_hwdev, _ptr, _size, _dir) \ dma_map_single((_hwdev) == NULL ? NULL : &(_hwdev->dev), \ (_ptr), (_size), (enum dma_data_direction)_dir) #define pci_unmap_single(_hwdev, _addr, _size, _dir) \ dma_unmap_single((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \ _addr, _size, (enum dma_data_direction)_dir) #define pci_unmap_sg(_hwdev, _sg, _nents, _dir) \ dma_unmap_sg((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \ _sg, _nents, (enum dma_data_direction)_dir) #define pci_map_page(_hwdev, _page, _offset, _size, _dir) \ dma_map_page((_hwdev) == NULL ? NULL : &(_hwdev)->dev, _page,\ _offset, _size, (enum dma_data_direction)_dir) #define pci_unmap_page(_hwdev, _dma_address, _size, _dir) \ dma_unmap_page((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \ _dma_address, _size, (enum dma_data_direction)_dir) #define pci_set_dma_mask(_pdev, mask) dma_set_mask(&(_pdev)->dev, (mask)) #define pci_dma_mapping_error(_pdev, _dma_addr) \ dma_mapping_error(&(_pdev)->dev, _dma_addr) #define pci_set_consistent_dma_mask(_pdev, _mask) \ dma_set_coherent_mask(&(_pdev)->dev, (_mask)) #define DECLARE_PCI_UNMAP_ADDR(x) DEFINE_DMA_UNMAP_ADDR(x); #define DECLARE_PCI_UNMAP_LEN(x) DEFINE_DMA_UNMAP_LEN(x); #define pci_unmap_addr dma_unmap_addr #define pci_unmap_addr_set dma_unmap_addr_set #define pci_unmap_len dma_unmap_len #define pci_unmap_len_set dma_unmap_len_set typedef unsigned int __bitwise pci_channel_state_t; typedef unsigned int __bitwise pci_ers_result_t; enum pci_channel_state { - pci_channel_io_normal = 1, - pci_channel_io_frozen = 2, - pci_channel_io_perm_failure = 3, + pci_channel_io_normal = 1, + pci_channel_io_frozen = 2, + pci_channel_io_perm_failure = 3, }; enum pci_ers_result { - PCI_ERS_RESULT_NONE = 1, - PCI_ERS_RESULT_CAN_RECOVER = 2, - PCI_ERS_RESULT_NEED_RESET = 3, - PCI_ERS_RESULT_DISCONNECT = 4, - PCI_ERS_RESULT_RECOVERED = 5, + PCI_ERS_RESULT_NONE = 1, + PCI_ERS_RESULT_CAN_RECOVER = 2, + PCI_ERS_RESULT_NEED_RESET = 3, + PCI_ERS_RESULT_DISCONNECT = 4, + PCI_ERS_RESULT_RECOVERED = 5, }; /* PCI bus error event callbacks */ struct pci_error_handlers { - pci_ers_result_t (*error_detected)(struct pci_dev *dev, - enum pci_channel_state error); - pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev); - pci_ers_result_t (*link_reset)(struct pci_dev *dev); - pci_ers_result_t (*slot_reset)(struct pci_dev *dev); - void (*resume)(struct pci_dev *dev); + pci_ers_result_t (*error_detected)(struct pci_dev *dev, + enum pci_channel_state error); + pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev); + pci_ers_result_t (*link_reset)(struct pci_dev *dev); + pci_ers_result_t (*slot_reset)(struct pci_dev *dev); + void (*resume)(struct pci_dev *dev); }; /* FreeBSD does not support SRIOV - yet */ static inline struct pci_dev *pci_physfn(struct pci_dev *dev) { - return dev; + return dev; } static inline bool pci_is_pcie(struct pci_dev *dev) { - return !!pci_pcie_cap(dev); + return !!pci_pcie_cap(dev); } static inline u16 pcie_flags_reg(struct pci_dev *dev) { - int pos; - u16 reg16; + int pos; + u16 reg16; - pos = pci_find_capability(dev, PCI_CAP_ID_EXP); - if (!pos) - return 0; + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return 0; - pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); + pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); - return reg16; + return reg16; } static inline int pci_pcie_type(struct pci_dev *dev) { - return (pcie_flags_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4; + return (pcie_flags_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4; } static inline int pcie_cap_version(struct pci_dev *dev) { - return pcie_flags_reg(dev) & PCI_EXP_FLAGS_VERS; + return pcie_flags_reg(dev) & PCI_EXP_FLAGS_VERS; } static inline bool pcie_cap_has_lnkctl(struct pci_dev *dev) { - int type = pci_pcie_type(dev); + int type = pci_pcie_type(dev); - return pcie_cap_version(dev) > 1 || - type == PCI_EXP_TYPE_ROOT_PORT || - type == PCI_EXP_TYPE_ENDPOINT || - type == PCI_EXP_TYPE_LEG_END; + return pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_ENDPOINT || + type == PCI_EXP_TYPE_LEG_END; } static inline bool pcie_cap_has_devctl(const struct pci_dev *dev) { - return true; + return true; } static inline bool pcie_cap_has_sltctl(struct pci_dev *dev) { - int type = pci_pcie_type(dev); + int type = pci_pcie_type(dev); - return pcie_cap_version(dev) > 1 || - type == PCI_EXP_TYPE_ROOT_PORT || - (type == PCI_EXP_TYPE_DOWNSTREAM && - pcie_flags_reg(dev) & PCI_EXP_FLAGS_SLOT); + return pcie_cap_version(dev) > 1 || type == PCI_EXP_TYPE_ROOT_PORT || + (type == PCI_EXP_TYPE_DOWNSTREAM && + pcie_flags_reg(dev) & PCI_EXP_FLAGS_SLOT); } static inline bool pcie_cap_has_rtctl(struct pci_dev *dev) { - int type = pci_pcie_type(dev); + int type = pci_pcie_type(dev); - return pcie_cap_version(dev) > 1 || - type == PCI_EXP_TYPE_ROOT_PORT || - type == PCI_EXP_TYPE_RC_EC; + return pcie_cap_version(dev) > 1 || type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_RC_EC; } static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) { - if (!pci_is_pcie(dev)) - return false; + if (!pci_is_pcie(dev)) + return false; - switch (pos) { - case PCI_EXP_FLAGS_TYPE: - return true; - case PCI_EXP_DEVCAP: - case PCI_EXP_DEVCTL: - case PCI_EXP_DEVSTA: - return pcie_cap_has_devctl(dev); - case PCI_EXP_LNKCAP: - case PCI_EXP_LNKCTL: - case PCI_EXP_LNKSTA: - return pcie_cap_has_lnkctl(dev); - case PCI_EXP_SLTCAP: - case PCI_EXP_SLTCTL: - case PCI_EXP_SLTSTA: - return pcie_cap_has_sltctl(dev); - case PCI_EXP_RTCTL: - case PCI_EXP_RTCAP: - case PCI_EXP_RTSTA: - return pcie_cap_has_rtctl(dev); - case PCI_EXP_DEVCAP2: - case PCI_EXP_DEVCTL2: - case PCI_EXP_LNKCAP2: - case PCI_EXP_LNKCTL2: - case PCI_EXP_LNKSTA2: - return pcie_cap_version(dev) > 1; - default: - return false; - } + switch (pos) { + case PCI_EXP_FLAGS_TYPE: + return true; + case PCI_EXP_DEVCAP: + case PCI_EXP_DEVCTL: + case PCI_EXP_DEVSTA: + return pcie_cap_has_devctl(dev); + case PCI_EXP_LNKCAP: + case PCI_EXP_LNKCTL: + case PCI_EXP_LNKSTA: + return pcie_cap_has_lnkctl(dev); + case PCI_EXP_SLTCAP: + case PCI_EXP_SLTCTL: + case PCI_EXP_SLTSTA: + return pcie_cap_has_sltctl(dev); + case PCI_EXP_RTCTL: + case PCI_EXP_RTCAP: + case PCI_EXP_RTSTA: + return pcie_cap_has_rtctl(dev); + case PCI_EXP_DEVCAP2: + case PCI_EXP_DEVCTL2: + case PCI_EXP_LNKCAP2: + case PCI_EXP_LNKCTL2: + case PCI_EXP_LNKSTA2: + return pcie_cap_version(dev) > 1; + default: + return false; + } } static inline int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *dst) { - if (pos & 3) - return -EINVAL; + if (pos & 3) + return -EINVAL; - if (!pcie_capability_reg_implemented(dev, pos)) - return -EINVAL; + if (!pcie_capability_reg_implemented(dev, pos)) + return -EINVAL; - return pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, dst); + return pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, dst); } static inline int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *dst) { - if (pos & 3) - return -EINVAL; + if (pos & 3) + return -EINVAL; - if (!pcie_capability_reg_implemented(dev, pos)) - return -EINVAL; + if (!pcie_capability_reg_implemented(dev, pos)) + return -EINVAL; - return pci_read_config_word(dev, pci_pcie_cap(dev) + pos, dst); + return pci_read_config_word(dev, pci_pcie_cap(dev) + pos, dst); } static inline int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) { - if (pos & 1) - return -EINVAL; + if (pos & 1) + return -EINVAL; - if (!pcie_capability_reg_implemented(dev, pos)) - return 0; + if (!pcie_capability_reg_implemented(dev, pos)) + return 0; - return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); + return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); } static inline int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, enum pcie_link_width *width) { *speed = PCI_SPEED_UNKNOWN; *width = PCIE_LNK_WIDTH_UNKNOWN; return (0); } static inline int pci_num_vf(struct pci_dev *dev) { return (0); } #endif /* _LINUX_PCI_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/rwlock.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/rwlock.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/rwlock.h (revision 331433) @@ -1,68 +1,68 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_RWLOCK_H_ #define _LINUX_RWLOCK_H_ #include #include #include #include typedef struct { struct rwlock rw; } rwlock_t; #define read_lock(_l) rw_rlock(&(_l)->rw) #define write_lock(_l) rw_wlock(&(_l)->rw) #define read_unlock(_l) rw_runlock(&(_l)->rw) #define write_unlock(_l) rw_wunlock(&(_l)->rw) #define read_lock_irq(lock) read_lock((lock)) #define read_unlock_irq(lock) read_unlock((lock)) #define write_lock_irq(lock) write_lock((lock)) #define write_unlock_irq(lock) write_unlock((lock)) -#define read_lock_irqsave(lock, flags) \ +#define read_lock_irqsave(lock, flags) \ do {(flags) = 0; read_lock(lock); } while (0) -#define write_lock_irqsave(lock, flags) \ +#define write_lock_irqsave(lock, flags) \ do {(flags) = 0; write_lock(lock); } while (0) #define read_unlock_irqrestore(lock, flags) \ do { read_unlock(lock); } while (0) #define write_unlock_irqrestore(lock, flags) \ do { write_unlock(lock); } while (0) static inline void rwlock_init(rwlock_t *lock) { memset(&lock->rw, 0, sizeof(lock->rw)); rw_init_flags(&lock->rw, "lnxrw", RW_NOWITNESS); } #endif /* _LINUX_RWLOCK_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/rwsem.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/rwsem.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/rwsem.h (revision 331433) @@ -1,84 +1,84 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_RWSEM_H_ #define _LINUX_RWSEM_H_ #include #include #include #include #include struct rw_semaphore { struct sx sx; }; #define down_write(_rw) sx_xlock(&(_rw)->sx) #define up_write(_rw) sx_xunlock(&(_rw)->sx) #define down_read(_rw) sx_slock(&(_rw)->sx) #define up_read(_rw) sx_sunlock(&(_rw)->sx) #define down_read_trylock(_rw) !!sx_try_slock(&(_rw)->sx) #define down_write_trylock(_rw) !!sx_try_xlock(&(_rw)->sx) #define down_write_killable(_rw) linux_down_write_killable(_rw) #define downgrade_write(_rw) sx_downgrade(&(_rw)->sx) #define down_read_nested(_rw, _sc) down_read(_rw) #define init_rwsem(_rw) linux_init_rwsem(_rw, rwsem_name("lnxrwsem")) #ifdef WITNESS_ALL /* NOTE: the maximum WITNESS name is 64 chars */ #define __rwsem_name(name, file, line) \ - (((const char *){file ":" #line "-" name}) + \ + (((const char *){file ":" #line "-" name}) + \ (sizeof(file) > 16 ? sizeof(file) - 16 : 0)) #else #define __rwsem_name(name, file, line) name #endif #define _rwsem_name(...) __rwsem_name(__VA_ARGS__) #define rwsem_name(name) _rwsem_name(name, __FILE__, __LINE__) #define DECLARE_RWSEM(name) \ struct rw_semaphore name; \ static void name##_rwsem_init(void *arg) \ { \ linux_init_rwsem(&name, rwsem_name(#name)); \ } \ SYSINIT(name, SI_SUB_LOCK, SI_ORDER_SECOND, name##_rwsem_init, NULL) static inline void linux_init_rwsem(struct rw_semaphore *rw, const char *name) { memset(rw, 0, sizeof(*rw)); sx_init_flags(&rw->sx, name, SX_NOWITNESS); } extern int linux_down_write_killable(struct rw_semaphore *); #endif /* _LINUX_RWSEM_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/slab.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/slab.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/slab.h (revision 331433) @@ -1,176 +1,176 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_SLAB_H_ #define _LINUX_SLAB_H_ #include #include #include #include #include #include #include MALLOC_DECLARE(M_KMALLOC); #define kvmalloc(size) kmalloc(size, 0) #define kzalloc(size, flags) kmalloc(size, (flags) | __GFP_ZERO) #define kzalloc_node(size, flags, node) kmalloc(size, (flags) | __GFP_ZERO) #define kfree_const(ptr) kfree(ptr) #define vzalloc(size) __vmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0) #define vfree(arg) kfree(arg) #define kvfree(arg) kfree(arg) -#define vmalloc_node(size, node) __vmalloc(size, GFP_KERNEL, 0) -#define vmalloc_user(size) __vmalloc(size, GFP_KERNEL | __GFP_ZERO, 0) -#define vmalloc(size) __vmalloc(size, GFP_KERNEL, 0) +#define vmalloc_node(size, node) __vmalloc(size, GFP_KERNEL, 0) +#define vmalloc_user(size) __vmalloc(size, GFP_KERNEL | __GFP_ZERO, 0) +#define vmalloc(size) __vmalloc(size, GFP_KERNEL, 0) #define __kmalloc(...) kmalloc(__VA_ARGS__) #define kmalloc_node(chunk, flags, n) kmalloc(chunk, flags) /* * Prefix some functions with linux_ to avoid namespace conflict * with the OpenSolaris code in the kernel. */ #define kmem_cache linux_kmem_cache #define kmem_cache_create(...) linux_kmem_cache_create(__VA_ARGS__) #define kmem_cache_alloc(...) linux_kmem_cache_alloc(__VA_ARGS__) -#define kmem_cache_free(...) linux_kmem_cache_free(__VA_ARGS__) +#define kmem_cache_free(...) linux_kmem_cache_free(__VA_ARGS__) #define kmem_cache_destroy(...) linux_kmem_cache_destroy(__VA_ARGS__) #define KMEM_CACHE(__struct, flags) \ linux_kmem_cache_create(#__struct, sizeof(struct __struct), \ __alignof(struct __struct), (flags), NULL) typedef void linux_kmem_ctor_t (void *); struct linux_kmem_cache { uma_zone_t cache_zone; linux_kmem_ctor_t *cache_ctor; unsigned cache_flags; unsigned cache_size; }; #define SLAB_HWCACHE_ALIGN (1 << 0) -#define SLAB_TYPESAFE_BY_RCU (1 << 1) +#define SLAB_TYPESAFE_BY_RCU (1 << 1) #define SLAB_RECLAIM_ACCOUNT (1 << 2) #define SLAB_DESTROY_BY_RCU \ SLAB_TYPESAFE_BY_RCU static inline gfp_t linux_check_m_flags(gfp_t flags) { const gfp_t m = M_NOWAIT | M_WAITOK; /* make sure either M_NOWAIT or M_WAITOK is set */ if ((flags & m) == 0) flags |= M_NOWAIT; else if ((flags & m) == m) flags &= ~M_WAITOK; /* mask away LinuxKPI specific flags */ return (flags & GFP_NATIVE_MASK); } static inline void * kmalloc(size_t size, gfp_t flags) { return (malloc(size, M_KMALLOC, linux_check_m_flags(flags))); } static inline void * kcalloc(size_t n, size_t size, gfp_t flags) { flags |= __GFP_ZERO; return (mallocarray(n, size, M_KMALLOC, linux_check_m_flags(flags))); } static inline void * __vmalloc(size_t size, gfp_t flags, int other) { return (malloc(size, M_KMALLOC, linux_check_m_flags(flags))); } static inline void * vmalloc_32(size_t size) { return (contigmalloc(size, M_KMALLOC, M_WAITOK, 0, UINT_MAX, 1, 1)); } static inline void * kmalloc_array(size_t n, size_t size, gfp_t flags) { return (mallocarray(n, size, M_KMALLOC, linux_check_m_flags(flags))); } static inline void * krealloc(void *ptr, size_t size, gfp_t flags) { return (realloc(ptr, size, M_KMALLOC, linux_check_m_flags(flags))); } static inline void kfree(const void *ptr) { free(__DECONST(void *, ptr), M_KMALLOC); } extern struct linux_kmem_cache *linux_kmem_cache_create(const char *name, size_t size, size_t align, unsigned flags, linux_kmem_ctor_t *ctor); static inline void * linux_kmem_cache_alloc(struct linux_kmem_cache *c, gfp_t flags) { return (uma_zalloc_arg(c->cache_zone, c, linux_check_m_flags(flags))); } static inline void * kmem_cache_zalloc(struct linux_kmem_cache *c, gfp_t flags) { return (uma_zalloc_arg(c->cache_zone, c, linux_check_m_flags(flags | M_ZERO))); } extern void linux_kmem_cache_free_rcu(struct linux_kmem_cache *, void *); static inline void linux_kmem_cache_free(struct linux_kmem_cache *c, void *m) { if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU)) linux_kmem_cache_free_rcu(c, m); else uma_zfree(c->cache_zone, m); } extern void linux_kmem_cache_destroy(struct linux_kmem_cache *); #endif /* _LINUX_SLAB_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/spinlock.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/spinlock.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/spinlock.h (revision 331433) @@ -1,163 +1,163 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_SPINLOCK_H_ #define _LINUX_SPINLOCK_H_ #include #include #include #include #include #include #include #include typedef struct { struct mtx m; } spinlock_t; /* * By defining CONFIG_SPIN_SKIP LinuxKPI spinlocks and asserts will be * skipped during panic(). By default it is disabled due to * performance reasons. */ #ifdef CONFIG_SPIN_SKIP #define SPIN_SKIP(void) unlikely(SCHEDULER_STOPPED() || kdb_active) #else #define SPIN_SKIP(void) 0 #endif #define spin_lock(_l) do { \ if (SPIN_SKIP()) \ break; \ mtx_lock(&(_l)->m); \ local_bh_disable(); \ } while (0) #define spin_lock_bh(_l) do { \ spin_lock(_l); \ } while (0) #define spin_lock_irq(_l) do { \ spin_lock(_l); \ } while (0) #define spin_unlock(_l) do { \ if (SPIN_SKIP()) \ break; \ local_bh_enable(); \ mtx_unlock(&(_l)->m); \ } while (0) #define spin_unlock_bh(_l) do { \ spin_unlock(_l); \ } while (0) #define spin_unlock_irq(_l) do { \ spin_unlock(_l); \ } while (0) #define spin_trylock(_l) ({ \ int __ret; \ if (SPIN_SKIP()) { \ __ret = 1; \ } else { \ __ret = mtx_trylock(&(_l)->m); \ if (likely(__ret != 0)) \ local_bh_disable(); \ } \ __ret; \ }) #define spin_trylock_irq(_l) \ spin_trylock(_l) #define spin_lock_nested(_l, _n) do { \ if (SPIN_SKIP()) \ break; \ mtx_lock_flags(&(_l)->m, MTX_DUPOK); \ local_bh_disable(); \ } while (0) #define spin_lock_irqsave(_l, flags) do { \ (flags) = 0; \ spin_lock(_l); \ } while (0) #define spin_lock_irqsave_nested(_l, flags, _n) do { \ (flags) = 0; \ spin_lock_nested(_l, _n); \ } while (0) #define spin_unlock_irqrestore(_l, flags) do { \ spin_unlock(_l); \ } while (0) #ifdef WITNESS_ALL /* NOTE: the maximum WITNESS name is 64 chars */ #define __spin_lock_name(name, file, line) \ - (((const char *){file ":" #line "-" name}) + \ + (((const char *){file ":" #line "-" name}) + \ (sizeof(file) > 16 ? sizeof(file) - 16 : 0)) #else #define __spin_lock_name(name, file, line) name #endif #define _spin_lock_name(...) __spin_lock_name(__VA_ARGS__) #define spin_lock_name(name) _spin_lock_name(name, __FILE__, __LINE__) #define spin_lock_init(lock) linux_spin_lock_init(lock, spin_lock_name("lnxspin")) static inline void linux_spin_lock_init(spinlock_t *lock, const char *name) { memset(lock, 0, sizeof(*lock)); mtx_init(&lock->m, name, NULL, MTX_DEF | MTX_NOWITNESS); } static inline void spin_lock_destroy(spinlock_t *lock) { mtx_destroy(&lock->m); } #define DEFINE_SPINLOCK(lock) \ spinlock_t lock; \ MTX_SYSINIT(lock, &(lock).m, spin_lock_name("lnxspin"), MTX_DEF) #define assert_spin_locked(_l) do { \ if (SPIN_SKIP()) \ break; \ mtx_assert(&(_l)->m, MA_OWNED); \ } while (0) #endif /* _LINUX_SPINLOCK_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/sysfs.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/sysfs.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/sysfs.h (revision 331433) @@ -1,197 +1,197 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_SYSFS_H_ #define _LINUX_SYSFS_H_ #include #include #include #include struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); }; struct attribute_group { const char *name; - mode_t (*is_visible)(struct kobject *, + mode_t (*is_visible)(struct kobject *, struct attribute *, int); struct attribute **attrs; }; #define __ATTR(_name, _mode, _show, _store) { \ .attr = { .name = __stringify(_name), .mode = _mode }, \ - .show = _show, .store = _store, \ + .show = _show, .store = _store, \ } #define __ATTR_RO(_name) __ATTR(_name, 0444, _name##_show, NULL) #define __ATTR_WO(_name) __ATTR(_name, 0200, NULL, _name##_store) #define __ATTR_RW(_name) __ATTR(_name, 0644, _name##_show, _name##_store) #define __ATTR_NULL { .attr = { .name = NULL } } #define ATTRIBUTE_GROUPS(_name) \ static struct attribute_group _name##_group = { \ .attrs = _name##_attrs, \ }; \ static struct attribute_group *_name##_groups[] = { \ &_name##_group, \ NULL, \ }; /* * Handle our generic '\0' terminated 'C' string. * Two cases: * a variable string: point arg1 at it, arg2 is max length. * a constant string: point arg1 at it, arg2 is zero. */ static inline int sysctl_handle_attr(SYSCTL_HANDLER_ARGS) { struct kobject *kobj; struct attribute *attr; const struct sysfs_ops *ops; char *buf; int error; ssize_t len; kobj = arg1; attr = (struct attribute *)(intptr_t)arg2; if (kobj->ktype == NULL || kobj->ktype->sysfs_ops == NULL) return (ENODEV); buf = (char *)get_zeroed_page(GFP_KERNEL); if (buf == NULL) return (ENOMEM); ops = kobj->ktype->sysfs_ops; if (ops->show) { len = ops->show(kobj, attr, buf); /* * It's valid to not have a 'show' so just return an * empty string. - */ + */ if (len < 0) { error = -len; if (error != EIO) goto out; buf[0] = '\0'; } else if (len) { len--; if (len >= PAGE_SIZE) len = PAGE_SIZE - 1; /* Trim trailing newline. */ buf[len] = '\0'; } } /* Leave one trailing byte to append a newline. */ error = sysctl_handle_string(oidp, buf, PAGE_SIZE - 1, req); if (error != 0 || req->newptr == NULL || ops->store == NULL) goto out; len = strlcat(buf, "\n", PAGE_SIZE); KASSERT(len < PAGE_SIZE, ("new attribute truncated")); len = ops->store(kobj, attr, buf, len); if (len < 0) error = -len; out: free_page((unsigned long)buf); return (error); } static inline int sysfs_create_file(struct kobject *kobj, const struct attribute *attr) { SYSCTL_ADD_OID(NULL, SYSCTL_CHILDREN(kobj->oidp), OID_AUTO, attr->name, CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_MPSAFE, kobj, (uintptr_t)attr, sysctl_handle_attr, "A", ""); return (0); } static inline void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr) { if (kobj->oidp) sysctl_remove_name(kobj->oidp, attr->name, 1, 1); } static inline void sysfs_remove_group(struct kobject *kobj, const struct attribute_group *grp) { if (kobj->oidp) sysctl_remove_name(kobj->oidp, grp->name, 1, 1); } static inline int sysfs_create_group(struct kobject *kobj, const struct attribute_group *grp) { struct attribute **attr; struct sysctl_oid *oidp; oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(kobj->oidp), OID_AUTO, grp->name, CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, grp->name); for (attr = grp->attrs; *attr != NULL; attr++) { SYSCTL_ADD_OID(NULL, SYSCTL_CHILDREN(oidp), OID_AUTO, (*attr)->name, CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_MPSAFE, kobj, (uintptr_t)*attr, sysctl_handle_attr, "A", ""); } return (0); } static inline int sysfs_create_dir(struct kobject *kobj) { kobj->oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(kobj->parent->oidp), OID_AUTO, kobj->name, CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, kobj->name); - return (0); + return (0); } static inline void sysfs_remove_dir(struct kobject *kobj) { if (kobj->oidp == NULL) return; sysctl_remove_oid(kobj->oidp, 1, 1); } #define sysfs_attr_init(attr) do {} while(0) #endif /* _LINUX_SYSFS_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/usb.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/usb.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/usb.h (revision 331433) @@ -1,319 +1,318 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2007 Luigi Rizzo - Universita` di Pisa. All rights reserved. * Copyright (c) 2007 Hans Petter Selasky. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _USB_COMPAT_LINUX_H #define _USB_COMPAT_LINUX_H #include #include #include #include #include #include #include struct usb_device; struct usb_interface; struct usb_driver; struct urb; typedef void *pm_message_t; typedef void (usb_complete_t)(struct urb *); #define USB_MAX_FULL_SPEED_ISOC_FRAMES (60 * 1) #define USB_MAX_HIGH_SPEED_ISOC_FRAMES (60 * 8) #define USB_DEVICE_ID_MATCH_DEVICE \ (USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT) #define USB_DEVICE(vend,prod) \ .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = (vend), \ .idProduct = (prod) /* The "usb_driver" structure holds the Linux USB device driver * callbacks, and a pointer to device ID's which this entry should * match against. Usually this entry is exposed to the USB emulation * layer using the "USB_DRIVER_EXPORT()" macro, which is defined * below. */ struct usb_driver { const char *name; - int (*probe) (struct usb_interface *intf, - const struct usb_device_id *id); + int (*probe)(struct usb_interface *intf, + const struct usb_device_id *id); - void (*disconnect) (struct usb_interface *intf); + void (*disconnect)(struct usb_interface *intf); - int (*ioctl) (struct usb_interface *intf, unsigned int code, - void *buf); + int (*ioctl)(struct usb_interface *intf, unsigned int code, void *buf); - int (*suspend) (struct usb_interface *intf, pm_message_t message); - int (*resume) (struct usb_interface *intf); + int (*suspend)(struct usb_interface *intf, pm_message_t message); + int (*resume)(struct usb_interface *intf); const struct usb_device_id *id_table; - void (*shutdown) (struct usb_interface *intf); + void (*shutdown)(struct usb_interface *intf); LIST_ENTRY(usb_driver) linux_driver_list; }; #define USB_DRIVER_EXPORT(id,p_usb_drv) \ SYSINIT(id,SI_SUB_KLD,SI_ORDER_FIRST,usb_linux_register,p_usb_drv); \ SYSUNINIT(id,SI_SUB_KLD,SI_ORDER_ANY,usb_linux_deregister,p_usb_drv) #define USB_DT_ENDPOINT_SIZE 7 #define USB_DT_ENDPOINT_AUDIO_SIZE 9 /* * Endpoints */ #define USB_ENDPOINT_NUMBER_MASK 0x0f /* in bEndpointAddress */ #define USB_ENDPOINT_DIR_MASK 0x80 #define USB_ENDPOINT_XFERTYPE_MASK 0x03 /* in bmAttributes */ #define USB_ENDPOINT_XFER_CONTROL 0 #define USB_ENDPOINT_XFER_ISOC 1 #define USB_ENDPOINT_XFER_BULK 2 #define USB_ENDPOINT_XFER_INT 3 #define USB_ENDPOINT_MAX_ADJUSTABLE 0x80 /* CONTROL REQUEST SUPPORT */ /* * Definition of direction mask for * "bEndpointAddress" and "bmRequestType": */ #define USB_DIR_MASK 0x80 #define USB_DIR_OUT 0x00 /* write to USB device */ #define USB_DIR_IN 0x80 /* read from USB device */ /* * Definition of type mask for * "bmRequestType": */ #define USB_TYPE_MASK (0x03 << 5) #define USB_TYPE_STANDARD (0x00 << 5) #define USB_TYPE_CLASS (0x01 << 5) #define USB_TYPE_VENDOR (0x02 << 5) #define USB_TYPE_RESERVED (0x03 << 5) /* * Definition of receiver mask for * "bmRequestType": */ #define USB_RECIP_MASK 0x1f #define USB_RECIP_DEVICE 0x00 #define USB_RECIP_INTERFACE 0x01 #define USB_RECIP_ENDPOINT 0x02 #define USB_RECIP_OTHER 0x03 /* * Definition of standard request values for * "bRequest": */ #define USB_REQ_GET_STATUS 0x00 #define USB_REQ_CLEAR_FEATURE 0x01 #define USB_REQ_SET_FEATURE 0x03 #define USB_REQ_SET_ADDRESS 0x05 #define USB_REQ_GET_DESCRIPTOR 0x06 #define USB_REQ_SET_DESCRIPTOR 0x07 #define USB_REQ_GET_CONFIGURATION 0x08 #define USB_REQ_SET_CONFIGURATION 0x09 #define USB_REQ_GET_INTERFACE 0x0A #define USB_REQ_SET_INTERFACE 0x0B #define USB_REQ_SYNCH_FRAME 0x0C #define USB_REQ_SET_ENCRYPTION 0x0D /* Wireless USB */ #define USB_REQ_GET_ENCRYPTION 0x0E #define USB_REQ_SET_HANDSHAKE 0x0F #define USB_REQ_GET_HANDSHAKE 0x10 #define USB_REQ_SET_CONNECTION 0x11 #define USB_REQ_SET_SECURITY_DATA 0x12 #define USB_REQ_GET_SECURITY_DATA 0x13 #define USB_REQ_SET_WUSB_DATA 0x14 #define USB_REQ_LOOPBACK_DATA_WRITE 0x15 #define USB_REQ_LOOPBACK_DATA_READ 0x16 #define USB_REQ_SET_INTERFACE_DS 0x17 /* * USB feature flags are written using USB_REQ_{CLEAR,SET}_FEATURE, and * are read as a bit array returned by USB_REQ_GET_STATUS. (So there * are at most sixteen features of each type.) */ #define USB_DEVICE_SELF_POWERED 0 /* (read only) */ #define USB_DEVICE_REMOTE_WAKEUP 1 /* dev may initiate wakeup */ #define USB_DEVICE_TEST_MODE 2 /* (wired high speed only) */ #define USB_DEVICE_BATTERY 2 /* (wireless) */ #define USB_DEVICE_B_HNP_ENABLE 3 /* (otg) dev may initiate HNP */ #define USB_DEVICE_WUSB_DEVICE 3 /* (wireless) */ #define USB_DEVICE_A_HNP_SUPPORT 4 /* (otg) RH port supports HNP */ #define USB_DEVICE_A_ALT_HNP_SUPPORT 5 /* (otg) other RH port does */ #define USB_DEVICE_DEBUG_MODE 6 /* (special devices only) */ #define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */ #define PIPE_ISOCHRONOUS 0x01 /* UE_ISOCHRONOUS */ #define PIPE_INTERRUPT 0x03 /* UE_INTERRUPT */ #define PIPE_CONTROL 0x00 /* UE_CONTROL */ #define PIPE_BULK 0x02 /* UE_BULK */ /* Whenever Linux references an USB endpoint: * a) to initialize "urb->endpoint" * b) second argument passed to "usb_control_msg()" * * Then it uses one of the following macros. The "endpoint" argument * is the physical endpoint value masked by 0xF. The "dev" argument * is a pointer to "struct usb_device". */ #define usb_sndctrlpipe(dev,endpoint) \ usb_find_host_endpoint(dev, PIPE_CONTROL, (endpoint) | USB_DIR_OUT) #define usb_rcvctrlpipe(dev,endpoint) \ usb_find_host_endpoint(dev, PIPE_CONTROL, (endpoint) | USB_DIR_IN) #define usb_sndisocpipe(dev,endpoint) \ usb_find_host_endpoint(dev, PIPE_ISOCHRONOUS, (endpoint) | USB_DIR_OUT) #define usb_rcvisocpipe(dev,endpoint) \ usb_find_host_endpoint(dev, PIPE_ISOCHRONOUS, (endpoint) | USB_DIR_IN) #define usb_sndbulkpipe(dev,endpoint) \ usb_find_host_endpoint(dev, PIPE_BULK, (endpoint) | USB_DIR_OUT) #define usb_rcvbulkpipe(dev,endpoint) \ usb_find_host_endpoint(dev, PIPE_BULK, (endpoint) | USB_DIR_IN) #define usb_sndintpipe(dev,endpoint) \ usb_find_host_endpoint(dev, PIPE_INTERRUPT, (endpoint) | USB_DIR_OUT) #define usb_rcvintpipe(dev,endpoint) \ usb_find_host_endpoint(dev, PIPE_INTERRUPT, (endpoint) | USB_DIR_IN) /* * The following structure is used to extend "struct urb" when we are * dealing with an isochronous endpoint. It contains information about * the data offset and data length of an isochronous packet. * The "actual_length" field is updated before the "complete" * callback in the "urb" structure is called. */ struct usb_iso_packet_descriptor { uint32_t offset; /* depreciated buffer offset (the * packets are usually back to back) */ uint16_t length; /* expected length */ uint16_t actual_length; int16_t status; /* transfer status */ }; /* * The following structure holds various information about an USB * transfer. This structure is used for all kinds of USB transfers. * * URB is short for USB Request Block. */ struct urb { TAILQ_ENTRY(urb) bsd_urb_list; struct cv cv_wait; struct usb_device *dev; /* (in) pointer to associated device */ struct usb_host_endpoint *endpoint; /* (in) pipe pointer */ uint8_t *setup_packet; /* (in) setup packet (control only) */ uint8_t *bsd_data_ptr; void *transfer_buffer; /* (in) associated data buffer */ void *context; /* (in) context for completion */ usb_complete_t *complete; /* (in) completion routine */ usb_size_t transfer_buffer_length;/* (in) data buffer length */ usb_size_t bsd_length_rem; usb_size_t actual_length; /* (return) actual transfer length */ usb_timeout_t timeout; /* FreeBSD specific */ uint16_t transfer_flags; /* (in) */ #define URB_SHORT_NOT_OK 0x0001 /* report short transfers like errors */ #define URB_ISO_ASAP 0x0002 /* ignore "start_frame" field */ #define URB_ZERO_PACKET 0x0004 /* the USB transfer ends with a short * packet */ #define URB_NO_TRANSFER_DMA_MAP 0x0008 /* "transfer_dma" is valid on submit */ #define URB_WAIT_WAKEUP 0x0010 /* custom flags */ #define URB_IS_SLEEPING 0x0020 /* custom flags */ usb_frcount_t start_frame; /* (modify) start frame (ISO) */ usb_frcount_t number_of_packets; /* (in) number of ISO packets */ uint16_t interval; /* (modify) transfer interval * (INT/ISO) */ uint16_t error_count; /* (return) number of ISO errors */ int16_t status; /* (return) status */ uint8_t setup_dma; /* (in) not used on FreeBSD */ uint8_t transfer_dma; /* (in) not used on FreeBSD */ uint8_t bsd_isread; uint8_t kill_count; /* FreeBSD specific */ struct usb_iso_packet_descriptor iso_frame_desc[]; /* (in) ISO ONLY */ }; /* various prototypes */ int usb_submit_urb(struct urb *urb, uint16_t mem_flags); int usb_unlink_urb(struct urb *urb); int usb_clear_halt(struct usb_device *dev, struct usb_host_endpoint *uhe); int usb_control_msg(struct usb_device *dev, struct usb_host_endpoint *ep, uint8_t request, uint8_t requesttype, uint16_t value, uint16_t index, void *data, uint16_t size, usb_timeout_t timeout); int usb_set_interface(struct usb_device *dev, uint8_t ifnum, uint8_t alternate); int usb_setup_endpoint(struct usb_device *dev, struct usb_host_endpoint *uhe, usb_frlength_t bufsize); struct usb_host_endpoint *usb_find_host_endpoint(struct usb_device *dev, uint8_t type, uint8_t ep); struct urb *usb_alloc_urb(uint16_t iso_packets, uint16_t mem_flags); struct usb_host_interface *usb_altnum_to_altsetting( const struct usb_interface *intf, uint8_t alt_index); struct usb_interface *usb_ifnum_to_if(struct usb_device *dev, uint8_t iface_no); void *usb_buffer_alloc(struct usb_device *dev, usb_size_t size, uint16_t mem_flags, uint8_t *dma_addr); void *usbd_get_intfdata(struct usb_interface *intf); void usb_buffer_free(struct usb_device *dev, usb_size_t size, void *addr, uint8_t dma_addr); void usb_free_urb(struct urb *urb); void usb_init_urb(struct urb *urb); void usb_kill_urb(struct urb *urb); void usb_set_intfdata(struct usb_interface *intf, void *data); void usb_linux_register(void *arg); void usb_linux_deregister(void *arg); void usb_fill_bulk_urb(struct urb *, struct usb_device *, struct usb_host_endpoint *, void *, int, usb_complete_t, void *); int usb_bulk_msg(struct usb_device *, struct usb_host_endpoint *, void *, int, uint16_t *, usb_timeout_t); #define interface_to_usbdev(intf) (intf)->linux_udev #define interface_to_bsddev(intf) (intf)->linux_udev #endif /* _USB_COMPAT_LINUX_H */ Index: head/sys/compat/linuxkpi/common/include/linux/workqueue.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/workqueue.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/linux/workqueue.h (revision 331433) @@ -1,232 +1,232 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_WORKQUEUE_H_ #define _LINUX_WORKQUEUE_H_ #include #include #include #include #include #include #include #include #include #define WORK_CPU_UNBOUND MAXCPU #define WQ_UNBOUND (1 << 0) #define WQ_HIGHPRI (1 << 1) struct work_struct; typedef void (*work_func_t)(struct work_struct *); struct work_exec { TAILQ_ENTRY(work_exec) entry; struct work_struct *target; }; struct workqueue_struct { struct taskqueue *taskqueue; struct mtx exec_mtx; TAILQ_HEAD(, work_exec) exec_head; atomic_t draining; }; #define WQ_EXEC_LOCK(wq) mtx_lock(&(wq)->exec_mtx) #define WQ_EXEC_UNLOCK(wq) mtx_unlock(&(wq)->exec_mtx) struct work_struct { struct task work_task; struct workqueue_struct *work_queue; work_func_t func; atomic_t state; }; #define DECLARE_WORK(name, fn) \ struct work_struct name; \ static void name##_init(void *arg) \ { \ INIT_WORK(&name, fn); \ } \ SYSINIT(name, SI_SUB_LOCK, SI_ORDER_SECOND, name##_init, NULL) struct delayed_work { struct work_struct work; struct { struct callout callout; struct mtx mtx; int expires; } timer; }; #define DECLARE_DELAYED_WORK(name, fn) \ struct delayed_work name; \ static void name##_init(void *arg) \ { \ linux_init_delayed_work(&name, fn); \ } \ SYSINIT(name, SI_SUB_LOCK, SI_ORDER_SECOND, name##_init, NULL) static inline struct delayed_work * to_delayed_work(struct work_struct *work) { return (container_of(work, struct delayed_work, work)); } -#define INIT_WORK(work, fn) \ +#define INIT_WORK(work, fn) \ do { \ (work)->func = (fn); \ (work)->work_queue = NULL; \ atomic_set(&(work)->state, 0); \ TASK_INIT(&(work)->work_task, 0, linux_work_fn, (work)); \ } while (0) #define INIT_WORK_ONSTACK(work, fn) \ INIT_WORK(work, fn) #define INIT_DELAYED_WORK(dwork, fn) \ linux_init_delayed_work(dwork, fn) #define INIT_DEFERRABLE_WORK(dwork, fn) \ INIT_DELAYED_WORK(dwork, fn) #define flush_scheduled_work() \ taskqueue_drain_all(system_wq->taskqueue) #define queue_work(wq, work) \ linux_queue_work_on(WORK_CPU_UNBOUND, wq, work) #define schedule_work(work) \ linux_queue_work_on(WORK_CPU_UNBOUND, system_wq, work) #define queue_delayed_work(wq, dwork, delay) \ linux_queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay) #define schedule_delayed_work_on(cpu, dwork, delay) \ linux_queue_delayed_work_on(cpu, system_wq, dwork, delay) #define queue_work_on(cpu, wq, work) \ linux_queue_work_on(cpu, wq, work) #define schedule_delayed_work(dwork, delay) \ linux_queue_delayed_work_on(WORK_CPU_UNBOUND, system_wq, dwork, delay) #define queue_delayed_work_on(cpu, wq, dwork, delay) \ linux_queue_delayed_work_on(cpu, wq, dwork, delay) #define create_singlethread_workqueue(name) \ linux_create_workqueue_common(name, 1) #define create_workqueue(name) \ linux_create_workqueue_common(name, mp_ncpus) #define alloc_ordered_workqueue(name, flags) \ linux_create_workqueue_common(name, 1) #define alloc_workqueue(name, flags, max_active) \ linux_create_workqueue_common(name, max_active) #define flush_workqueue(wq) \ taskqueue_drain_all((wq)->taskqueue) #define drain_workqueue(wq) do { \ atomic_inc(&(wq)->draining); \ taskqueue_drain_all((wq)->taskqueue); \ atomic_dec(&(wq)->draining); \ } while (0) #define mod_delayed_work(wq, dwork, delay) ({ \ bool __retval; \ __retval = linux_cancel_delayed_work(dwork); \ linux_queue_delayed_work_on(WORK_CPU_UNBOUND, \ wq, dwork, delay); \ __retval; \ }) #define delayed_work_pending(dwork) \ linux_work_pending(&(dwork)->work) #define cancel_delayed_work(dwork) \ linux_cancel_delayed_work(dwork) #define cancel_work_sync(work) \ linux_cancel_work_sync(work) #define cancel_delayed_work_sync(dwork) \ linux_cancel_delayed_work_sync(dwork) #define flush_work(work) \ linux_flush_work(work) #define flush_delayed_work(dwork) \ linux_flush_delayed_work(dwork) #define work_pending(work) \ linux_work_pending(work) #define work_busy(work) \ linux_work_busy(work) #define destroy_work_on_stack(work) \ do { } while (0) #define destroy_delayed_work_on_stack(dwork) \ do { } while (0) #define destroy_workqueue(wq) \ linux_destroy_workqueue(wq) /* prototypes */ extern struct workqueue_struct *system_wq; extern struct workqueue_struct *system_long_wq; extern struct workqueue_struct *system_unbound_wq; extern struct workqueue_struct *system_power_efficient_wq; extern void linux_init_delayed_work(struct delayed_work *, work_func_t); extern void linux_work_fn(void *, int); extern void linux_delayed_work_fn(void *, int); extern struct workqueue_struct *linux_create_workqueue_common(const char *, int); extern void linux_destroy_workqueue(struct workqueue_struct *); extern bool linux_queue_work_on(int cpu, struct workqueue_struct *, struct work_struct *); extern bool linux_queue_delayed_work_on(int cpu, struct workqueue_struct *, struct delayed_work *, unsigned delay); extern bool linux_cancel_delayed_work(struct delayed_work *); extern bool linux_cancel_work_sync(struct work_struct *); extern bool linux_cancel_delayed_work_sync(struct delayed_work *); extern bool linux_flush_work(struct work_struct *); extern bool linux_flush_delayed_work(struct delayed_work *); extern bool linux_work_pending(struct work_struct *); extern bool linux_work_busy(struct work_struct *); #endif /* _LINUX_WORKQUEUE_H_ */ Index: head/sys/compat/linuxkpi/common/include/net/if_inet6.h =================================================================== --- head/sys/compat/linuxkpi/common/include/net/if_inet6.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/net/if_inet6.h (revision 331433) @@ -1,53 +1,53 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _NET_IF_INET6_H_ #define _NET_IF_INET6_H_ #include #include #include static inline void ipv6_eth_mc_map(const struct in6_addr *addr, char *buf) { /* * +-------+-------+-------+-------+-------+-------+ * | 33 | 33 | DST13 | DST14 | DST15 | DST16 | * +-------+-------+-------+-------+-------+-------+ */ - buf[0]= 0x33; - buf[1]= 0x33; + buf[0]= 0x33; + buf[1]= 0x33; - memcpy(buf + 2, &addr->s6_addr32[3], sizeof(__u32)); + memcpy(buf + 2, &addr->s6_addr32[3], sizeof(__u32)); } #endif /* _NET_IF_INET6_H_ */ Index: head/sys/compat/linuxkpi/common/include/net/ipv6.h =================================================================== --- head/sys/compat/linuxkpi/common/include/net/ipv6.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/net/ipv6.h (revision 331433) @@ -1,115 +1,114 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_NET_IPV6_H_ #define _LINUX_NET_IPV6_H_ #include #include #include #define IPV6_DEFAULT_HOPLIMIT 64 #define ipv6_addr_loopback(addr) IN6_IS_ADDR_LOOPBACK(addr) #define ipv6_addr_any(addr) IN6_IS_ADDR_UNSPECIFIED(addr) #define ipv6_addr_copy(dst, src) \ memcpy((dst), (src), sizeof(struct in6_addr)) static inline void ipv6_ib_mc_map(const struct in6_addr *addr, const unsigned char *broadcast, char *buf) { unsigned char scope; scope = broadcast[5] & 0xF; buf[0] = 0; buf[1] = 0xff; buf[2] = 0xff; buf[3] = 0xff; buf[4] = 0xff; buf[5] = 0x10 | scope; buf[6] = 0x60; buf[7] = 0x1b; buf[8] = broadcast[8]; buf[9] = broadcast[9]; memcpy(&buf[10], &addr->s6_addr[6], 10); } -static inline void __ipv6_addr_set_half(__be32 *addr, - __be32 wh, __be32 wl) +static inline void __ipv6_addr_set_half(__be32 *addr, __be32 wh, __be32 wl) { #if BITS_PER_LONG == 64 #if defined(__BIG_ENDIAN) - if (__builtin_constant_p(wh) && __builtin_constant_p(wl)) { - *(__force u64 *)addr = ((__force u64)(wh) << 32 | (__force u64)(wl)); - return; - } + if (__builtin_constant_p(wh) && __builtin_constant_p(wl)) { + *(__force u64 *)addr = ((__force u64)(wh) << 32 | (__force u64)(wl)); + return; + } #elif defined(__LITTLE_ENDIAN) - if (__builtin_constant_p(wl) && __builtin_constant_p(wh)) { - *(__force u64 *)addr = ((__force u64)(wl) << 32 | (__force u64)(wh)); - return; - } + if (__builtin_constant_p(wl) && __builtin_constant_p(wh)) { + *(__force u64 *)addr = ((__force u64)(wl) << 32 | (__force u64)(wh)); + return; + } #endif #endif - addr[0] = wh; - addr[1] = wl; + addr[0] = wh; + addr[1] = wl; } static inline void ipv6_addr_set(struct in6_addr *addr, - __be32 w1, __be32 w2, - __be32 w3, __be32 w4) + __be32 w1, __be32 w2, + __be32 w3, __be32 w4) { - __ipv6_addr_set_half(&addr->s6_addr32[0], w1, w2); - __ipv6_addr_set_half(&addr->s6_addr32[2], w3, w4); + __ipv6_addr_set_half(&addr->s6_addr32[0], w1, w2); + __ipv6_addr_set_half(&addr->s6_addr32[2], w3, w4); } static inline void ipv6_addr_set_v4mapped(const __be32 addr, struct in6_addr *v4mapped) { ipv6_addr_set(v4mapped, 0, 0, htonl(0x0000FFFF), addr); } static inline int ipv6_addr_v4mapped(const struct in6_addr *a) { return ((a->s6_addr32[0] | a->s6_addr32[1] | (a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0); } static inline int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2) { - return memcmp(a1, a2, sizeof(struct in6_addr)); + return memcmp(a1, a2, sizeof(struct in6_addr)); } #endif /* _LINUX_NET_IPV6_H_ */ Index: head/sys/compat/linuxkpi/common/include/net/netevent.h =================================================================== --- head/sys/compat/linuxkpi/common/include/net/netevent.h (revision 331432) +++ head/sys/compat/linuxkpi/common/include/net/netevent.h (revision 331433) @@ -1,75 +1,75 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_NET_NETEVENT_H_ #define _LINUX_NET_NETEVENT_H_ #include #include #include enum netevent_notif_type { NETEVENT_NEIGH_UPDATE = 0, #if 0 /* Unsupported events. */ - NETEVENT_PMTU_UPDATE, - NETEVENT_REDIRECT, + NETEVENT_PMTU_UPDATE, + NETEVENT_REDIRECT, #endif }; struct llentry; static inline void _handle_arp_update_event(void *arg, struct llentry *lle, int evt __unused) { struct notifier_block *nb; nb = arg; nb->notifier_call(nb, NETEVENT_NEIGH_UPDATE, lle); } static inline int register_netevent_notifier(struct notifier_block *nb) { nb->tags[NETEVENT_NEIGH_UPDATE] = EVENTHANDLER_REGISTER( lle_event, _handle_arp_update_event, nb, 0); return (0); } static inline int unregister_netevent_notifier(struct notifier_block *nb) { EVENTHANDLER_DEREGISTER(lle_event, nb->tags[NETEVENT_NEIGH_UPDATE]); return (0); } #endif /* _LINUX_NET_NETEVENT_H_ */ Index: head/sys/compat/linuxkpi/common/src/linux_compat.c =================================================================== --- head/sys/compat/linuxkpi/common/src/linux_compat.c (revision 331432) +++ head/sys/compat/linuxkpi/common/src/linux_compat.c (revision 331433) @@ -1,2266 +1,2266 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2018 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) #include #endif SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW, 0, "LinuxKPI parameters"); MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat"); #include /* Undo Linux compat changes. */ #undef RB_ROOT #undef file #undef cdev #define RB_ROOT(head) (head)->rbh_root static struct vm_area_struct *linux_cdev_handle_find(void *handle); struct kobject linux_class_root; struct device linux_root_device; struct class linux_class_misc; struct list_head pci_drivers; struct list_head pci_devices; spinlock_t pci_lock; unsigned long linux_timer_hz_mask; int panic_cmp(struct rb_node *one, struct rb_node *two) { panic("no cmp"); } RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) { va_list tmp_va; int len; char *old; char *name; char dummy; old = kobj->name; if (old && fmt == NULL) return (0); /* compute length of string */ va_copy(tmp_va, args); len = vsnprintf(&dummy, 0, fmt, tmp_va); va_end(tmp_va); /* account for zero termination */ len++; /* check for error */ if (len < 1) return (-EINVAL); /* allocate memory for string */ name = kzalloc(len, GFP_KERNEL); if (name == NULL) return (-ENOMEM); vsnprintf(name, len, fmt, args); kobj->name = name; /* free old string */ kfree(old); /* filter new string */ for (; *name != '\0'; name++) if (*name == '/') *name = '!'; return (0); } int kobject_set_name(struct kobject *kobj, const char *fmt, ...) { va_list args; int error; va_start(args, fmt); error = kobject_set_name_vargs(kobj, fmt, args); va_end(args); return (error); } static int kobject_add_complete(struct kobject *kobj, struct kobject *parent) { const struct kobj_type *t; int error; kobj->parent = parent; error = sysfs_create_dir(kobj); if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { struct attribute **attr; t = kobj->ktype; for (attr = t->default_attrs; *attr != NULL; attr++) { error = sysfs_create_file(kobj, *attr); if (error) break; } if (error) sysfs_remove_dir(kobj); - + } return (error); } int kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) { va_list args; int error; va_start(args, fmt); error = kobject_set_name_vargs(kobj, fmt, args); va_end(args); if (error) return (error); return kobject_add_complete(kobj, parent); } void linux_kobject_release(struct kref *kref) { struct kobject *kobj; char *name; kobj = container_of(kref, struct kobject, kref); sysfs_remove_dir(kobj); name = kobj->name; if (kobj->ktype && kobj->ktype->release) kobj->ktype->release(kobj); kfree(name); } static void linux_kobject_kfree(struct kobject *kobj) { kfree(kobj); } static void linux_kobject_kfree_name(struct kobject *kobj) { if (kobj) { kfree(kobj->name); } } const struct kobj_type linux_kfree_type = { .release = linux_kobject_kfree }; static void linux_device_release(struct device *dev) { pr_debug("linux_device_release: %s\n", dev_name(dev)); kfree(dev); } static ssize_t linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct class_attribute *dattr; ssize_t error; dattr = container_of(attr, struct class_attribute, attr); error = -EIO; if (dattr->show) error = dattr->show(container_of(kobj, struct class, kobj), dattr, buf); return (error); } static ssize_t linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct class_attribute *dattr; ssize_t error; dattr = container_of(attr, struct class_attribute, attr); error = -EIO; if (dattr->store) error = dattr->store(container_of(kobj, struct class, kobj), dattr, buf, count); return (error); } static void linux_class_release(struct kobject *kobj) { struct class *class; class = container_of(kobj, struct class, kobj); if (class->class_release) class->class_release(class); } static const struct sysfs_ops linux_class_sysfs = { .show = linux_class_show, .store = linux_class_store, }; const struct kobj_type linux_class_ktype = { .release = linux_class_release, .sysfs_ops = &linux_class_sysfs }; static void linux_dev_release(struct kobject *kobj) { struct device *dev; dev = container_of(kobj, struct device, kobj); /* This is the precedence defined by linux. */ if (dev->release) dev->release(dev); else if (dev->class && dev->class->dev_release) dev->class->dev_release(dev); } static ssize_t linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct device_attribute *dattr; ssize_t error; dattr = container_of(attr, struct device_attribute, attr); error = -EIO; if (dattr->show) error = dattr->show(container_of(kobj, struct device, kobj), dattr, buf); return (error); } static ssize_t linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct device_attribute *dattr; ssize_t error; dattr = container_of(attr, struct device_attribute, attr); error = -EIO; if (dattr->store) error = dattr->store(container_of(kobj, struct device, kobj), dattr, buf, count); return (error); } static const struct sysfs_ops linux_dev_sysfs = { .show = linux_dev_show, .store = linux_dev_store, }; const struct kobj_type linux_dev_ktype = { .release = linux_dev_release, .sysfs_ops = &linux_dev_sysfs }; struct device * device_create(struct class *class, struct device *parent, dev_t devt, void *drvdata, const char *fmt, ...) { struct device *dev; va_list args; dev = kzalloc(sizeof(*dev), M_WAITOK); dev->parent = parent; dev->class = class; dev->devt = devt; dev->driver_data = drvdata; dev->release = linux_device_release; va_start(args, fmt); kobject_set_name_vargs(&dev->kobj, fmt, args); va_end(args); device_register(dev); return (dev); } int kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, struct kobject *parent, const char *fmt, ...) { va_list args; int error; kobject_init(kobj, ktype); kobj->ktype = ktype; kobj->parent = parent; kobj->name = NULL; va_start(args, fmt); error = kobject_set_name_vargs(kobj, fmt, args); va_end(args); if (error) return (error); return kobject_add_complete(kobj, parent); } static void linux_kq_lock(void *arg) { spinlock_t *s = arg; spin_lock(s); } static void linux_kq_unlock(void *arg) { spinlock_t *s = arg; spin_unlock(s); } static void linux_kq_lock_owned(void *arg) { #ifdef INVARIANTS spinlock_t *s = arg; mtx_assert(&s->m, MA_OWNED); #endif } static void linux_kq_lock_unowned(void *arg) { #ifdef INVARIANTS spinlock_t *s = arg; mtx_assert(&s->m, MA_NOTOWNED); #endif } static void linux_file_kqfilter_poll(struct linux_file *, int); struct linux_file * linux_file_alloc(void) { struct linux_file *filp; filp = kzalloc(sizeof(*filp), GFP_KERNEL); /* set initial refcount */ filp->f_count = 1; /* setup fields needed by kqueue support */ spin_lock_init(&filp->f_kqlock); knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, linux_kq_lock, linux_kq_unlock, linux_kq_lock_owned, linux_kq_lock_unowned); return (filp); } void linux_file_free(struct linux_file *filp) { if (filp->_file == NULL) { if (filp->f_shmem != NULL) vm_object_deallocate(filp->f_shmem); kfree(filp); } else { /* * The close method of the character device or file * will free the linux_file structure: */ _fdrop(filp->_file, curthread); } } static int linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres) { struct vm_area_struct *vmap; vmap = linux_cdev_handle_find(vm_obj->handle); MPASS(vmap != NULL); MPASS(vmap->vm_private_data == vm_obj->handle); if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; vm_page_t page; if (((*mres)->flags & PG_FICTITIOUS) != 0) { /* * If the passed in result page is a fake * page, update it with the new physical * address. */ page = *mres; vm_page_updatefake(page, paddr, vm_obj->memattr); } else { /* * Replace the passed in "mres" page with our * own fake page and free up the all of the * original pages. */ VM_OBJECT_WUNLOCK(vm_obj); page = vm_page_getfake(paddr, vm_obj->memattr); VM_OBJECT_WLOCK(vm_obj); vm_page_replace_checked(page, vm_obj, (*mres)->pindex, *mres); vm_page_lock(*mres); vm_page_free(*mres); vm_page_unlock(*mres); *mres = page; } page->valid = VM_PAGE_BITS_ALL; return (VM_PAGER_OK); } return (VM_PAGER_FAIL); } static int linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) { struct vm_area_struct *vmap; int err; linux_set_current(curthread); /* get VM area structure */ vmap = linux_cdev_handle_find(vm_obj->handle); MPASS(vmap != NULL); MPASS(vmap->vm_private_data == vm_obj->handle); VM_OBJECT_WUNLOCK(vm_obj); down_write(&vmap->vm_mm->mmap_sem); if (unlikely(vmap->vm_ops == NULL)) { err = VM_FAULT_SIGBUS; } else { struct vm_fault vmf; /* fill out VM fault structure */ vmf.virtual_address = (void *)((uintptr_t)pidx << PAGE_SHIFT); vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; vmf.pgoff = 0; vmf.page = NULL; vmf.vma = vmap; vmap->vm_pfn_count = 0; vmap->vm_pfn_pcount = &vmap->vm_pfn_count; vmap->vm_obj = vm_obj; err = vmap->vm_ops->fault(vmap, &vmf); while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { kern_yield(PRI_USER); err = vmap->vm_ops->fault(vmap, &vmf); } } /* translate return code */ switch (err) { case VM_FAULT_OOM: err = VM_PAGER_AGAIN; break; case VM_FAULT_SIGBUS: err = VM_PAGER_BAD; break; case VM_FAULT_NOPAGE: /* * By contract the fault handler will return having * busied all the pages itself. If pidx is already * found in the object, it will simply xbusy the first * page and return with vm_pfn_count set to 1. */ *first = vmap->vm_pfn_first; *last = *first + vmap->vm_pfn_count - 1; err = VM_PAGER_OK; break; default: err = VM_PAGER_ERROR; break; } up_write(&vmap->vm_mm->mmap_sem); VM_OBJECT_WLOCK(vm_obj); return (err); } static struct rwlock linux_vma_lock; static TAILQ_HEAD(, vm_area_struct) linux_vma_head = TAILQ_HEAD_INITIALIZER(linux_vma_head); static void linux_cdev_handle_free(struct vm_area_struct *vmap) { /* Drop reference on vm_file */ if (vmap->vm_file != NULL) fput(vmap->vm_file); /* Drop reference on mm_struct */ mmput(vmap->vm_mm); kfree(vmap); } static void linux_cdev_handle_remove(struct vm_area_struct *vmap) { rw_wlock(&linux_vma_lock); TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); rw_wunlock(&linux_vma_lock); } static struct vm_area_struct * linux_cdev_handle_find(void *handle) { struct vm_area_struct *vmap; rw_rlock(&linux_vma_lock); TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { if (vmap->vm_private_data == handle) break; } rw_runlock(&linux_vma_lock); return (vmap); } static int linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred, u_short *color) { MPASS(linux_cdev_handle_find(handle) != NULL); *color = 0; return (0); } static void linux_cdev_pager_dtor(void *handle) { const struct vm_operations_struct *vm_ops; struct vm_area_struct *vmap; vmap = linux_cdev_handle_find(handle); MPASS(vmap != NULL); /* * Remove handle before calling close operation to prevent * other threads from reusing the handle pointer. */ linux_cdev_handle_remove(vmap); down_write(&vmap->vm_mm->mmap_sem); vm_ops = vmap->vm_ops; if (likely(vm_ops != NULL)) vm_ops->close(vmap); up_write(&vmap->vm_mm->mmap_sem); linux_cdev_handle_free(vmap); } static struct cdev_pager_ops linux_cdev_pager_ops[2] = { { /* OBJT_MGTDEVICE */ .cdev_pg_populate = linux_cdev_pager_populate, .cdev_pg_ctor = linux_cdev_pager_ctor, .cdev_pg_dtor = linux_cdev_pager_dtor }, { /* OBJT_DEVICE */ .cdev_pg_fault = linux_cdev_pager_fault, .cdev_pg_ctor = linux_cdev_pager_ctor, .cdev_pg_dtor = linux_cdev_pager_dtor }, }; #define OPW(fp,td,code) ({ \ struct file *__fpop; \ __typeof(code) __retval; \ \ __fpop = (td)->td_fpop; \ (td)->td_fpop = (fp); \ __retval = (code); \ (td)->td_fpop = __fpop; \ __retval; \ }) static int linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, struct file *file) { struct linux_cdev *ldev; struct linux_file *filp; int error; ldev = dev->si_drv1; filp = linux_file_alloc(); filp->f_dentry = &filp->f_dentry_store; filp->f_op = ldev->ops; filp->f_mode = file->f_flag; filp->f_flags = file->f_flag; filp->f_vnode = file->f_vnode; filp->_file = file; linux_set_current(td); if (filp->f_op->open) { error = -filp->f_op->open(file->f_vnode, filp); if (error) { kfree(filp); return (error); } } /* hold on to the vnode - used for fstat() */ vhold(filp->f_vnode); /* release the file from devfs */ finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); return (ENXIO); } #define LINUX_IOCTL_MIN_PTR 0x10000UL #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) static inline int linux_remap_address(void **uaddr, size_t len) { uintptr_t uaddr_val = (uintptr_t)(*uaddr); if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && uaddr_val < LINUX_IOCTL_MAX_PTR)) { struct task_struct *pts = current; if (pts == NULL) { *uaddr = NULL; return (1); } /* compute data offset */ uaddr_val -= LINUX_IOCTL_MIN_PTR; /* check that length is within bounds */ if ((len > IOCPARM_MAX) || (uaddr_val + len) > pts->bsd_ioctl_len) { *uaddr = NULL; return (1); } /* re-add kernel buffer address */ uaddr_val += (uintptr_t)pts->bsd_ioctl_data; /* update address location */ *uaddr = (void *)uaddr_val; return (1); } return (0); } int linux_copyin(const void *uaddr, void *kaddr, size_t len) { if (linux_remap_address(__DECONST(void **, &uaddr), len)) { if (uaddr == NULL) return (-EFAULT); memcpy(kaddr, uaddr, len); return (0); } return (-copyin(uaddr, kaddr, len)); } int linux_copyout(const void *kaddr, void *uaddr, size_t len) { if (linux_remap_address(&uaddr, len)) { if (uaddr == NULL) return (-EFAULT); memcpy(uaddr, kaddr, len); return (0); } return (-copyout(kaddr, uaddr, len)); } size_t linux_clear_user(void *_uaddr, size_t _len) { uint8_t *uaddr = _uaddr; size_t len = _len; /* make sure uaddr is aligned before going into the fast loop */ while (((uintptr_t)uaddr & 7) != 0 && len > 7) { if (subyte(uaddr, 0)) return (_len); uaddr++; len--; } /* zero 8 bytes at a time */ while (len > 7) { #ifdef __LP64__ if (suword64(uaddr, 0)) return (_len); #else if (suword32(uaddr, 0)) return (_len); if (suword32(uaddr + 4, 0)) return (_len); #endif uaddr += 8; len -= 8; } /* zero fill end, if any */ while (len > 0) { if (subyte(uaddr, 0)) return (_len); uaddr++; len--; } return (0); } int linux_access_ok(int rw, const void *uaddr, size_t len) { uintptr_t saddr; uintptr_t eaddr; /* get start and end address */ saddr = (uintptr_t)uaddr; eaddr = (uintptr_t)uaddr + len; /* verify addresses are valid for userspace */ return ((saddr == eaddr) || (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); } /* * This function should return either EINTR or ERESTART depending on * the signal type sent to this thread: */ static int linux_get_error(struct task_struct *task, int error) { /* check for signal type interrupt code */ if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { error = -linux_schedule_get_interrupt_value(task); if (error == 0) error = EINTR; } return (error); } static int linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, u_long cmd, caddr_t data, struct thread *td) { struct task_struct *task = current; unsigned size; int error; size = IOCPARM_LEN(cmd); /* refer to logic in sys_ioctl() */ if (size > 0) { /* * Setup hint for linux_copyin() and linux_copyout(). * * Background: Linux code expects a user-space address * while FreeBSD supplies a kernel-space address. */ task->bsd_ioctl_data = data; task->bsd_ioctl_len = size; data = (void *)LINUX_IOCTL_MIN_PTR; } else { /* fetch user-space pointer */ data = *(void **)data; } #if defined(__amd64__) if (td->td_proc->p_elf_machine == EM_386) { /* try the compat IOCTL handler first */ if (filp->f_op->compat_ioctl != NULL) error = -OPW(fp, td, filp->f_op->compat_ioctl(filp, cmd, (u_long)data)); else error = ENOTTY; /* fallback to the regular IOCTL handler, if any */ if (error == ENOTTY && filp->f_op->unlocked_ioctl != NULL) error = -OPW(fp, td, filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data)); } else #endif if (filp->f_op->unlocked_ioctl != NULL) error = -OPW(fp, td, filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data)); else error = ENOTTY; if (size > 0) { task->bsd_ioctl_data = NULL; task->bsd_ioctl_len = 0; } if (error == EWOULDBLOCK) { /* update kqfilter status, if any */ linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); } else { error = linux_get_error(task, error); } return (error); } #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) /* * This function atomically updates the poll wakeup state and returns * the previous state at the time of update. */ static uint8_t linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) { int c, old; c = v->counter; while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) c = old; return (c); } static int linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) { static const uint8_t state[LINUX_FWQ_STATE_MAX] = { [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ }; struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { case LINUX_FWQ_STATE_QUEUED: linux_poll_wakeup(filp); return (1); default: return (0); } } void linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) { static const uint8_t state[LINUX_FWQ_STATE_MAX] = { [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, }; /* check if we are called inside the select system call */ if (p == LINUX_POLL_TABLE_NORMAL) selrecord(curthread, &filp->f_selinfo); switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { case LINUX_FWQ_STATE_INIT: /* NOTE: file handles can only belong to one wait-queue */ filp->f_wait_queue.wqh = wqh; filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; add_wait_queue(wqh, &filp->f_wait_queue.wq); atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); break; default: break; } } static void linux_poll_wait_dequeue(struct linux_file *filp) { static const uint8_t state[LINUX_FWQ_STATE_MAX] = { [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, }; seldrain(&filp->f_selinfo); switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { case LINUX_FWQ_STATE_NOT_READY: case LINUX_FWQ_STATE_QUEUED: case LINUX_FWQ_STATE_READY: remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); break; default: break; } } void linux_poll_wakeup(struct linux_file *filp) { /* this function should be NULL-safe */ if (filp == NULL) return; selwakeup(&filp->f_selinfo); spin_lock(&filp->f_kqlock); filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | LINUX_KQ_FLAG_NEED_WRITE; /* make sure the "knote" gets woken up */ KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); spin_unlock(&filp->f_kqlock); } static void linux_file_kqfilter_detach(struct knote *kn) { struct linux_file *filp = kn->kn_hook; spin_lock(&filp->f_kqlock); knlist_remove(&filp->f_selinfo.si_note, kn, 1); spin_unlock(&filp->f_kqlock); } static int linux_file_kqfilter_read_event(struct knote *kn, long hint) { struct linux_file *filp = kn->kn_hook; mtx_assert(&filp->f_kqlock.m, MA_OWNED); return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); } static int linux_file_kqfilter_write_event(struct knote *kn, long hint) { struct linux_file *filp = kn->kn_hook; mtx_assert(&filp->f_kqlock.m, MA_OWNED); return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); } static struct filterops linux_dev_kqfiltops_read = { .f_isfd = 1, .f_detach = linux_file_kqfilter_detach, .f_event = linux_file_kqfilter_read_event, }; static struct filterops linux_dev_kqfiltops_write = { .f_isfd = 1, .f_detach = linux_file_kqfilter_detach, .f_event = linux_file_kqfilter_write_event, }; static void linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) { int temp; if (filp->f_kqflags & kqflags) { struct thread *td = curthread; /* get the latest polling state */ temp = OPW(filp->_file, td, filp->f_op->poll(filp, NULL)); spin_lock(&filp->f_kqlock); /* clear kqflags */ filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | LINUX_KQ_FLAG_NEED_WRITE); /* update kqflags */ if (temp & (POLLIN | POLLOUT)) { if (temp & POLLIN) filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; if (temp & POLLOUT) filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; /* make sure the "knote" gets woken up */ KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); } spin_unlock(&filp->f_kqlock); } } static int linux_file_kqfilter(struct file *file, struct knote *kn) { struct linux_file *filp; struct thread *td; int error; td = curthread; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; if (filp->f_op->poll == NULL) return (EINVAL); spin_lock(&filp->f_kqlock); switch (kn->kn_filter) { case EVFILT_READ: filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; kn->kn_fop = &linux_dev_kqfiltops_read; kn->kn_hook = filp; knlist_add(&filp->f_selinfo.si_note, kn, 1); error = 0; break; case EVFILT_WRITE: filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; kn->kn_fop = &linux_dev_kqfiltops_write; kn->kn_hook = filp; knlist_add(&filp->f_selinfo.si_note, kn, 1); error = 0; break; default: error = EINVAL; break; } spin_unlock(&filp->f_kqlock); if (error == 0) { linux_set_current(td); /* update kqfilter status, if any */ linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); } return (error); } static int linux_file_mmap_single(struct file *fp, vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, int nprot, struct thread *td) { struct task_struct *task; struct vm_area_struct *vmap; struct mm_struct *mm; struct linux_file *filp; vm_memattr_t attr; int error; filp = (struct linux_file *)fp->f_data; filp->f_flags = fp->f_flag; if (filp->f_op->mmap == NULL) return (EOPNOTSUPP); linux_set_current(td); /* * The same VM object might be shared by multiple processes * and the mm_struct is usually freed when a process exits. * * The atomic reference below makes sure the mm_struct is * available as long as the vmap is in the linux_vma_head. */ task = current; mm = task->mm; if (atomic_inc_not_zero(&mm->mm_users) == 0) return (EINVAL); vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); vmap->vm_start = 0; vmap->vm_end = size; vmap->vm_pgoff = *offset / PAGE_SIZE; vmap->vm_pfn = 0; vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); vmap->vm_ops = NULL; vmap->vm_file = get_file(filp); vmap->vm_mm = mm; if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { error = linux_get_error(task, EINTR); } else { error = -OPW(fp, td, filp->f_op->mmap(filp, vmap)); error = linux_get_error(task, error); up_write(&vmap->vm_mm->mmap_sem); } if (error != 0) { linux_cdev_handle_free(vmap); return (error); } attr = pgprot2cachemode(vmap->vm_page_prot); if (vmap->vm_ops != NULL) { struct vm_area_struct *ptr; void *vm_private_data; bool vm_no_fault; if (vmap->vm_ops->open == NULL || vmap->vm_ops->close == NULL || vmap->vm_private_data == NULL) { /* free allocated VM area struct */ linux_cdev_handle_free(vmap); return (EINVAL); } vm_private_data = vmap->vm_private_data; rw_wlock(&linux_vma_lock); TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { if (ptr->vm_private_data == vm_private_data) break; } /* check if there is an existing VM area struct */ if (ptr != NULL) { /* check if the VM area structure is invalid */ if (ptr->vm_ops == NULL || ptr->vm_ops->open == NULL || ptr->vm_ops->close == NULL) { error = ESTALE; vm_no_fault = 1; } else { error = EEXIST; vm_no_fault = (ptr->vm_ops->fault == NULL); } } else { /* insert VM area structure into list */ TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); error = 0; vm_no_fault = (vmap->vm_ops->fault == NULL); } rw_wunlock(&linux_vma_lock); if (error != 0) { /* free allocated VM area struct */ linux_cdev_handle_free(vmap); /* check for stale VM area struct */ if (error != EEXIST) return (error); } /* check if there is no fault handler */ if (vm_no_fault) { *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, &linux_cdev_pager_ops[1], size, nprot, *offset, td->td_ucred); } else { *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, &linux_cdev_pager_ops[0], size, nprot, *offset, td->td_ucred); } /* check if allocating the VM object failed */ if (*object == NULL) { if (error == 0) { /* remove VM area struct from list */ linux_cdev_handle_remove(vmap); /* free allocated VM area struct */ linux_cdev_handle_free(vmap); } return (EINVAL); } } else { struct sglist *sg; sg = sglist_alloc(1, M_WAITOK); sglist_append_phys(sg, (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, nprot, 0, td->td_ucred); linux_cdev_handle_free(vmap); if (*object == NULL) { sglist_free(sg); return (EINVAL); } } if (attr != VM_MEMATTR_DEFAULT) { VM_OBJECT_WLOCK(*object); vm_object_set_memattr(*object, attr); VM_OBJECT_WUNLOCK(*object); } *offset = 0; return (0); } struct cdevsw linuxcdevsw = { .d_version = D_VERSION, .d_fdopen = linux_dev_fdopen, .d_name = "lkpidev", }; static int linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { struct linux_file *filp; ssize_t bytes; int error; error = 0; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; /* XXX no support for I/O vectors currently */ if (uio->uio_iovcnt != 1) return (EOPNOTSUPP); if (uio->uio_resid > DEVFS_IOSIZE_MAX) return (EINVAL); linux_set_current(td); if (filp->f_op->read) { bytes = OPW(file, td, filp->f_op->read(filp, uio->uio_iov->iov_base, uio->uio_iov->iov_len, &uio->uio_offset)); if (bytes >= 0) { uio->uio_iov->iov_base = ((uint8_t *)uio->uio_iov->iov_base) + bytes; uio->uio_iov->iov_len -= bytes; uio->uio_resid -= bytes; } else { error = linux_get_error(current, -bytes); } } else error = ENXIO; /* update kqfilter status, if any */ linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); return (error); } static int linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { struct linux_file *filp; ssize_t bytes; int error; error = 0; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; /* XXX no support for I/O vectors currently */ if (uio->uio_iovcnt != 1) return (EOPNOTSUPP); if (uio->uio_resid > DEVFS_IOSIZE_MAX) return (EINVAL); linux_set_current(td); if (filp->f_op->write) { bytes = OPW(file, td, filp->f_op->write(filp, uio->uio_iov->iov_base, uio->uio_iov->iov_len, &uio->uio_offset)); if (bytes >= 0) { uio->uio_iov->iov_base = ((uint8_t *)uio->uio_iov->iov_base) + bytes; uio->uio_iov->iov_len -= bytes; uio->uio_resid -= bytes; } else { error = linux_get_error(current, -bytes); } } else error = ENXIO; /* update kqfilter status, if any */ linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); return (error); } static int linux_file_poll(struct file *file, int events, struct ucred *active_cred, struct thread *td) { struct linux_file *filp; int revents; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; linux_set_current(td); if (filp->f_op->poll != NULL) revents = OPW(file, td, filp->f_op->poll(filp, LINUX_POLL_TABLE_NORMAL)) & events; else revents = 0; return (revents); } static int linux_file_close(struct file *file, struct thread *td) { struct linux_file *filp; int error; filp = (struct linux_file *)file->f_data; KASSERT(file_count(filp) == 0, ("File refcount(%d) is not zero", file_count(filp))); filp->f_flags = file->f_flag; linux_set_current(td); linux_poll_wait_dequeue(filp); error = -OPW(file, td, filp->f_op->release(filp->f_vnode, filp)); funsetown(&filp->f_sigio); if (filp->f_vnode != NULL) vdrop(filp->f_vnode); kfree(filp); return (error); } static int linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, struct thread *td) { struct linux_file *filp; int error; filp = (struct linux_file *)fp->f_data; filp->f_flags = fp->f_flag; error = 0; linux_set_current(td); switch (cmd) { case FIONBIO: break; case FIOASYNC: if (filp->f_op->fasync == NULL) break; error = -OPW(fp, td, filp->f_op->fasync(0, filp, fp->f_flag & FASYNC)); break; case FIOSETOWN: error = fsetown(*(int *)data, &filp->f_sigio); if (error == 0) { if (filp->f_op->fasync == NULL) break; error = -OPW(fp, td, filp->f_op->fasync(0, filp, fp->f_flag & FASYNC)); } break; case FIOGETOWN: *(int *)data = fgetown(&filp->f_sigio); break; default: error = linux_file_ioctl_sub(fp, filp, cmd, data, td); break; } return (error); } static int linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp, struct file *fp, vm_ooffset_t *foff, vm_object_t *objp) { /* * Character devices do not provide private mappings * of any kind: */ if ((*maxprotp & VM_PROT_WRITE) == 0 && (prot & VM_PROT_WRITE) != 0) return (EACCES); if ((*flagsp & (MAP_PRIVATE | MAP_COPY)) != 0) return (EINVAL); return (linux_file_mmap_single(fp, foff, objsize, objp, (int)prot, td)); } static int linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, struct thread *td) { struct linux_file *filp; struct mount *mp; struct vnode *vp; vm_object_t object; vm_prot_t maxprot; int error; filp = (struct linux_file *)fp->f_data; vp = filp->f_vnode; if (vp == NULL) return (EOPNOTSUPP); /* * Ensure that file and memory protections are * compatible. */ mp = vp->v_mount; if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { maxprot = VM_PROT_NONE; if ((prot & VM_PROT_EXECUTE) != 0) return (EACCES); } else maxprot = VM_PROT_EXECUTE; if ((fp->f_flag & FREAD) != 0) maxprot |= VM_PROT_READ; else if ((prot & VM_PROT_READ) != 0) return (EACCES); /* * If we are sharing potential changes via MAP_SHARED and we * are trying to get write permission although we opened it * without asking for it, bail out. * * Note that most character devices always share mappings. * * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE * requests rather than doing it here. */ if ((flags & MAP_SHARED) != 0) { if ((fp->f_flag & FWRITE) != 0) maxprot |= VM_PROT_WRITE; else if ((prot & VM_PROT_WRITE) != 0) return (EACCES); } maxprot &= cap_maxprot; error = linux_file_mmap_sub(td, size, prot, &maxprot, &flags, fp, &foff, &object); if (error != 0) return (error); error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, foff, FALSE, td); if (error != 0) vm_object_deallocate(object); return (error); } static int linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, struct thread *td) { struct linux_file *filp; struct vnode *vp; int error; filp = (struct linux_file *)fp->f_data; if (filp->f_vnode == NULL) return (EOPNOTSUPP); vp = filp->f_vnode; vn_lock(vp, LK_SHARED | LK_RETRY); error = vn_stat(vp, sb, td->td_ucred, NOCRED, td); VOP_UNLOCK(vp, 0); return (error); } static int linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) { return (0); } unsigned int linux_iminor(struct inode *inode) { struct linux_cdev *ldev; if (inode == NULL || inode->v_rdev == NULL || inode->v_rdev->si_devsw != &linuxcdevsw) return (-1U); ldev = inode->v_rdev->si_drv1; if (ldev == NULL) return (-1U); return (minor(ldev->dev)); } struct fileops linuxfileops = { .fo_read = linux_file_read, .fo_write = linux_file_write, .fo_truncate = invfo_truncate, .fo_kqfilter = linux_file_kqfilter, .fo_stat = linux_file_stat, .fo_fill_kinfo = linux_file_fill_kinfo, .fo_poll = linux_file_poll, .fo_close = linux_file_close, .fo_ioctl = linux_file_ioctl, .fo_mmap = linux_file_mmap, .fo_chmod = invfo_chmod, .fo_chown = invfo_chown, .fo_sendfile = invfo_sendfile, .fo_flags = DFLAG_PASSABLE, }; /* * Hash of vmmap addresses. This is infrequently accessed and does not * need to be particularly large. This is done because we must store the * caller's idea of the map size to properly unmap. */ struct vmmap { LIST_ENTRY(vmmap) vm_next; void *vm_addr; unsigned long vm_size; }; struct vmmaphd { struct vmmap *lh_first; }; #define VMMAP_HASH_SIZE 64 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; static struct mtx vmmaplock; static void vmmap_add(void *addr, unsigned long size) { struct vmmap *vmmap; vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); mtx_lock(&vmmaplock); vmmap->vm_size = size; vmmap->vm_addr = addr; LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); mtx_unlock(&vmmaplock); } static struct vmmap * vmmap_remove(void *addr) { struct vmmap *vmmap; mtx_lock(&vmmaplock); LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) if (vmmap->vm_addr == addr) break; if (vmmap) LIST_REMOVE(vmmap, vm_next); mtx_unlock(&vmmaplock); return (vmmap); } #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) void * _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) { void *addr; addr = pmap_mapdev_attr(phys_addr, size, attr); if (addr == NULL) return (NULL); vmmap_add(addr, size); return (addr); } #endif void iounmap(void *addr) { struct vmmap *vmmap; vmmap = vmmap_remove(addr); if (vmmap == NULL) return; #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); #endif kfree(vmmap); } void * vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) { vm_offset_t off; size_t size; size = count * PAGE_SIZE; off = kva_alloc(size); if (off == 0) return (NULL); vmmap_add((void *)off, size); pmap_qenter(off, pages, count); return ((void *)off); } void vunmap(void *addr) { struct vmmap *vmmap; vmmap = vmmap_remove(addr); if (vmmap == NULL) return; pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); kva_free((vm_offset_t)addr, vmmap->vm_size); kfree(vmmap); } char * kvasprintf(gfp_t gfp, const char *fmt, va_list ap) { unsigned int len; char *p; va_list aq; va_copy(aq, ap); len = vsnprintf(NULL, 0, fmt, aq); va_end(aq); p = kmalloc(len + 1, gfp); if (p != NULL) vsnprintf(p, len + 1, fmt, ap); return (p); } char * kasprintf(gfp_t gfp, const char *fmt, ...) { va_list ap; char *p; va_start(ap, fmt); p = kvasprintf(gfp, fmt, ap); va_end(ap); return (p); } static void linux_timer_callback_wrapper(void *context) { struct timer_list *timer; linux_set_current(curthread); timer = context; timer->function(timer->data); } void mod_timer(struct timer_list *timer, int expires) { timer->expires = expires; callout_reset(&timer->callout, linux_timer_jiffies_until(expires), &linux_timer_callback_wrapper, timer); } void add_timer(struct timer_list *timer) { callout_reset(&timer->callout, linux_timer_jiffies_until(timer->expires), &linux_timer_callback_wrapper, timer); } void add_timer_on(struct timer_list *timer, int cpu) { callout_reset_on(&timer->callout, linux_timer_jiffies_until(timer->expires), &linux_timer_callback_wrapper, timer, cpu); } static void linux_timer_init(void *arg) { /* * Compute an internal HZ value which can divide 2**32 to * avoid timer rounding problems when the tick value wraps * around 2**32: */ linux_timer_hz_mask = 1; while (linux_timer_hz_mask < (unsigned long)hz) linux_timer_hz_mask *= 2; linux_timer_hz_mask--; } SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); void linux_complete_common(struct completion *c, int all) { int wakeup_swapper; sleepq_lock(c); if (all) { c->done = UINT_MAX; wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); } else { if (c->done != UINT_MAX) c->done++; wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); } sleepq_release(c); if (wakeup_swapper) kick_proc0(); } /* * Indefinite wait for done != 0 with or without signals. */ int linux_wait_for_common(struct completion *c, int flags) { struct task_struct *task; int error; if (SCHEDULER_STOPPED()) return (0); DROP_GIANT(); task = current; if (flags != 0) flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; else flags = SLEEPQ_SLEEP; error = 0; for (;;) { sleepq_lock(c); if (c->done) break; sleepq_add(c, NULL, "completion", flags, 0); if (flags & SLEEPQ_INTERRUPTIBLE) { error = -sleepq_wait_sig(c, 0); if (error != 0) { linux_schedule_save_interrupt_value(task, error); error = -ERESTARTSYS; goto intr; } } else sleepq_wait(c, 0); } if (c->done != UINT_MAX) c->done--; sleepq_release(c); intr: PICKUP_GIANT(); return (error); } /* * Time limited wait for done != 0 with or without signals. */ int linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) { struct task_struct *task; int end = jiffies + timeout; int error; if (SCHEDULER_STOPPED()) return (0); DROP_GIANT(); task = current; if (flags != 0) flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; else flags = SLEEPQ_SLEEP; for (;;) { sleepq_lock(c); if (c->done) break; sleepq_add(c, NULL, "completion", flags, 0); sleepq_set_timeout(c, linux_timer_jiffies_until(end)); if (flags & SLEEPQ_INTERRUPTIBLE) error = -sleepq_timedwait_sig(c, 0); else error = -sleepq_timedwait(c, 0); if (error != 0) { /* check for timeout */ if (error == -EWOULDBLOCK) { error = 0; /* timeout */ } else { /* signal happened */ linux_schedule_save_interrupt_value(task, error); error = -ERESTARTSYS; } goto done; } } if (c->done != UINT_MAX) c->done--; sleepq_release(c); /* return how many jiffies are left */ error = linux_timer_jiffies_until(end); done: PICKUP_GIANT(); return (error); } int linux_try_wait_for_completion(struct completion *c) { int isdone; sleepq_lock(c); isdone = (c->done != 0); if (c->done != 0 && c->done != UINT_MAX) c->done--; sleepq_release(c); return (isdone); } int linux_completion_done(struct completion *c) { int isdone; sleepq_lock(c); isdone = (c->done != 0); sleepq_release(c); return (isdone); } static void linux_cdev_release(struct kobject *kobj) { struct linux_cdev *cdev; struct kobject *parent; cdev = container_of(kobj, struct linux_cdev, kobj); parent = kobj->parent; if (cdev->cdev) destroy_dev(cdev->cdev); kfree(cdev); kobject_put(parent); } static void linux_cdev_static_release(struct kobject *kobj) { struct linux_cdev *cdev; struct kobject *parent; cdev = container_of(kobj, struct linux_cdev, kobj); parent = kobj->parent; if (cdev->cdev) destroy_dev(cdev->cdev); kobject_put(parent); } const struct kobj_type linux_cdev_ktype = { .release = linux_cdev_release, }; const struct kobj_type linux_cdev_static_ktype = { .release = linux_cdev_static_release, }; static void linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) { struct notifier_block *nb; nb = arg; if (linkstate == LINK_STATE_UP) nb->notifier_call(nb, NETDEV_UP, ifp); else nb->notifier_call(nb, NETDEV_DOWN, ifp); } static void linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; nb = arg; nb->notifier_call(nb, NETDEV_REGISTER, ifp); } static void linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; nb = arg; nb->notifier_call(nb, NETDEV_UNREGISTER, ifp); } static void linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; nb = arg; nb->notifier_call(nb, NETDEV_CHANGEADDR, ifp); } static void linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; nb = arg; nb->notifier_call(nb, NETDEV_CHANGEIFADDR, ifp); } int register_netdevice_notifier(struct notifier_block *nb) { nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( iflladdr_event, linux_handle_iflladdr_event, nb, 0); return (0); } int register_inetaddr_notifier(struct notifier_block *nb) { - nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( - ifaddr_event, linux_handle_ifaddr_event, nb, 0); - return (0); + nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( + ifaddr_event, linux_handle_ifaddr_event, nb, 0); + return (0); } int unregister_netdevice_notifier(struct notifier_block *nb) { - EVENTHANDLER_DEREGISTER(ifnet_link_event, + EVENTHANDLER_DEREGISTER(ifnet_link_event, nb->tags[NETDEV_UP]); - EVENTHANDLER_DEREGISTER(ifnet_arrival_event, + EVENTHANDLER_DEREGISTER(ifnet_arrival_event, nb->tags[NETDEV_REGISTER]); - EVENTHANDLER_DEREGISTER(ifnet_departure_event, + EVENTHANDLER_DEREGISTER(ifnet_departure_event, nb->tags[NETDEV_UNREGISTER]); - EVENTHANDLER_DEREGISTER(iflladdr_event, + EVENTHANDLER_DEREGISTER(iflladdr_event, nb->tags[NETDEV_CHANGEADDR]); return (0); } int unregister_inetaddr_notifier(struct notifier_block *nb) { - EVENTHANDLER_DEREGISTER(ifaddr_event, - nb->tags[NETDEV_CHANGEIFADDR]); + EVENTHANDLER_DEREGISTER(ifaddr_event, + nb->tags[NETDEV_CHANGEIFADDR]); - return (0); + return (0); } struct list_sort_thunk { int (*cmp)(void *, struct list_head *, struct list_head *); void *priv; }; static inline int linux_le_cmp(void *priv, const void *d1, const void *d2) { struct list_head *le1, *le2; struct list_sort_thunk *thunk; thunk = priv; le1 = *(__DECONST(struct list_head **, d1)); le2 = *(__DECONST(struct list_head **, d2)); return ((thunk->cmp)(thunk->priv, le1, le2)); } void list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, struct list_head *a, struct list_head *b)) { struct list_sort_thunk thunk; struct list_head **ar, *le; size_t count, i; count = 0; list_for_each(le, head) count++; ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); i = 0; list_for_each(le, head) ar[i++] = le; thunk.cmp = cmp; thunk.priv = priv; qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); INIT_LIST_HEAD(head); for (i = 0; i < count; i++) list_add_tail(ar[i], head); free(ar, M_KMALLOC); } void linux_irq_handler(void *ent) { struct irq_ent *irqe; linux_set_current(curthread); irqe = ent; irqe->handler(irqe->irq, irqe->arg); } #if defined(__i386__) || defined(__amd64__) int linux_wbinvd_on_all_cpus(void) { pmap_invalidate_cache(); return (0); } #endif int linux_on_each_cpu(void callback(void *), void *data) { smp_rendezvous(smp_no_rendezvous_barrier, callback, smp_no_rendezvous_barrier, data); return (0); } int linux_in_atomic(void) { return ((curthread->td_pflags & TDP_NOFAULTING) != 0); } struct linux_cdev * linux_find_cdev(const char *name, unsigned major, unsigned minor) { dev_t dev = MKDEV(major, minor); struct cdev *cdev; dev_lock(); LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { struct linux_cdev *ldev = cdev->si_drv1; if (ldev->dev == dev && strcmp(kobject_name(&ldev->kobj), name) == 0) { break; } } dev_unlock(); return (cdev != NULL ? cdev->si_drv1 : NULL); } int __register_chrdev(unsigned int major, unsigned int baseminor, unsigned int count, const char *name, const struct file_operations *fops) { struct linux_cdev *cdev; int ret = 0; int i; for (i = baseminor; i < baseminor + count; i++) { cdev = cdev_alloc(); cdev_init(cdev, fops); kobject_set_name(&cdev->kobj, name); ret = cdev_add(cdev, makedev(major, i), 1); if (ret != 0) break; } return (ret); } int __register_chrdev_p(unsigned int major, unsigned int baseminor, unsigned int count, const char *name, const struct file_operations *fops, uid_t uid, gid_t gid, int mode) { struct linux_cdev *cdev; int ret = 0; int i; for (i = baseminor; i < baseminor + count; i++) { cdev = cdev_alloc(); cdev_init(cdev, fops); kobject_set_name(&cdev->kobj, name); ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); if (ret != 0) break; } return (ret); } void __unregister_chrdev(unsigned int major, unsigned int baseminor, unsigned int count, const char *name) { struct linux_cdev *cdevp; int i; for (i = baseminor; i < baseminor + count; i++) { cdevp = linux_find_cdev(name, major, i); if (cdevp != NULL) cdev_del(cdevp); } } #if defined(__i386__) || defined(__amd64__) bool linux_cpu_has_clflush; #endif static void linux_compat_init(void *arg) { struct sysctl_oid *rootoid; int i; #if defined(__i386__) || defined(__amd64__) linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); #endif rw_init(&linux_vma_lock, "lkpi-vma-lock"); rootoid = SYSCTL_ADD_ROOT_NODE(NULL, OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); kobject_init(&linux_class_root, &linux_class_ktype); kobject_set_name(&linux_class_root, "class"); linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); kobject_init(&linux_root_device.kobj, &linux_dev_ktype); kobject_set_name(&linux_root_device.kobj, "device"); linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL, "device"); linux_root_device.bsddev = root_bus; linux_class_misc.name = "misc"; class_register(&linux_class_misc); INIT_LIST_HEAD(&pci_drivers); INIT_LIST_HEAD(&pci_devices); spin_lock_init(&pci_lock); mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); for (i = 0; i < VMMAP_HASH_SIZE; i++) LIST_INIT(&vmmaphead[i]); } SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); static void linux_compat_uninit(void *arg) { linux_kobject_kfree_name(&linux_class_root); linux_kobject_kfree_name(&linux_root_device.kobj); linux_kobject_kfree_name(&linux_class_misc.kobj); mtx_destroy(&vmmaplock); spin_lock_destroy(&pci_lock); rw_destroy(&linux_vma_lock); } SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); /* * NOTE: Linux frequently uses "unsigned long" for pointer to integer * conversion and vice versa, where in FreeBSD "uintptr_t" would be * used. Assert these types have the same size, else some parts of the * LinuxKPI may not work like expected: */ CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); Index: head/sys/compat/linuxkpi/common/src/linux_idr.c =================================================================== --- head/sys/compat/linuxkpi/common/src/linux_idr.c (revision 331432) +++ head/sys/compat/linuxkpi/common/src/linux_idr.c (revision 331433) @@ -1,788 +1,788 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS) #define MAX_IDR_FREE (MAX_IDR_LEVEL * 2) struct linux_idr_cache { spinlock_t lock; struct idr_layer *head; unsigned count; }; static DPCPU_DEFINE(struct linux_idr_cache, linux_idr_cache); /* * IDR Implementation. * * This is quick and dirty and not as re-entrant as the linux version * however it should be fairly fast. It is basically a radix tree with * a builtin bitmap for allocation. */ static MALLOC_DEFINE(M_IDR, "idr", "Linux IDR compat"); static struct idr_layer * idr_preload_dequeue_locked(struct linux_idr_cache *lic) { struct idr_layer *retval; /* check if wrong thread is trying to dequeue */ if (mtx_owned(&lic->lock.m) == 0) return (NULL); retval = lic->head; if (likely(retval != NULL)) { lic->head = retval->ary[0]; lic->count--; retval->ary[0] = NULL; } return (retval); } static void idr_preload_init(void *arg) { int cpu; CPU_FOREACH(cpu) { struct linux_idr_cache *lic = DPCPU_ID_PTR(cpu, linux_idr_cache); spin_lock_init(&lic->lock); } } SYSINIT(idr_preload_init, SI_SUB_CPU, SI_ORDER_ANY, idr_preload_init, NULL); static void idr_preload_uninit(void *arg) { int cpu; CPU_FOREACH(cpu) { struct idr_layer *cacheval; struct linux_idr_cache *lic = DPCPU_ID_PTR(cpu, linux_idr_cache); while (1) { spin_lock(&lic->lock); cacheval = idr_preload_dequeue_locked(lic); spin_unlock(&lic->lock); if (cacheval == NULL) break; free(cacheval, M_IDR); } spin_lock_destroy(&lic->lock); } } SYSUNINIT(idr_preload_uninit, SI_SUB_LOCK, SI_ORDER_FIRST, idr_preload_uninit, NULL); void idr_preload(gfp_t gfp_mask) { struct linux_idr_cache *lic; struct idr_layer *cacheval; sched_pin(); lic = &DPCPU_GET(linux_idr_cache); /* fill up cache */ spin_lock(&lic->lock); while (lic->count < MAX_IDR_FREE) { spin_unlock(&lic->lock); cacheval = malloc(sizeof(*cacheval), M_IDR, M_ZERO | gfp_mask); spin_lock(&lic->lock); if (cacheval == NULL) break; cacheval->ary[0] = lic->head; lic->head = cacheval; lic->count++; } } void idr_preload_end(void) { struct linux_idr_cache *lic; lic = &DPCPU_GET(linux_idr_cache); spin_unlock(&lic->lock); sched_unpin(); } static inline int idr_max(struct idr *idr) { return (1 << (idr->layers * IDR_BITS)) - 1; } static inline int idr_pos(int id, int layer) { return (id >> (IDR_BITS * layer)) & IDR_MASK; } void idr_init(struct idr *idr) { bzero(idr, sizeof(*idr)); mtx_init(&idr->lock, "idr", NULL, MTX_DEF); } /* Only frees cached pages. */ void idr_destroy(struct idr *idr) { struct idr_layer *il, *iln; idr_remove_all(idr); mtx_lock(&idr->lock); for (il = idr->free; il != NULL; il = iln) { iln = il->ary[0]; free(il, M_IDR); } mtx_unlock(&idr->lock); mtx_destroy(&idr->lock); } static void idr_remove_layer(struct idr_layer *il, int layer) { int i; if (il == NULL) return; if (layer == 0) { free(il, M_IDR); return; } for (i = 0; i < IDR_SIZE; i++) if (il->ary[i]) idr_remove_layer(il->ary[i], layer - 1); } void idr_remove_all(struct idr *idr) { mtx_lock(&idr->lock); idr_remove_layer(idr->top, idr->layers - 1); idr->top = NULL; idr->layers = 0; mtx_unlock(&idr->lock); } static void idr_remove_locked(struct idr *idr, int id) { struct idr_layer *il; int layer; int idx; id &= MAX_ID_MASK; il = idr->top; layer = idr->layers - 1; if (il == NULL || id > idr_max(idr)) return; /* * Walk down the tree to this item setting bitmaps along the way * as we know at least one item will be free along this path. */ while (layer && il) { idx = idr_pos(id, layer); il->bitmap |= 1 << idx; il = il->ary[idx]; layer--; } idx = id & IDR_MASK; /* * At this point we've set free space bitmaps up the whole tree. * We could make this non-fatal and unwind but linux dumps a stack * and a warning so I don't think it's necessary. */ if (il == NULL || (il->bitmap & (1 << idx)) != 0) panic("idr_remove: Item %d not allocated (%p, %p)\n", id, idr, il); il->ary[idx] = NULL; il->bitmap |= 1 << idx; } void idr_remove(struct idr *idr, int id) { mtx_lock(&idr->lock); idr_remove_locked(idr, id); mtx_unlock(&idr->lock); } static inline struct idr_layer * idr_find_layer_locked(struct idr *idr, int id) { struct idr_layer *il; int layer; id &= MAX_ID_MASK; il = idr->top; layer = idr->layers - 1; if (il == NULL || id > idr_max(idr)) return (NULL); while (layer && il) { il = il->ary[idr_pos(id, layer)]; layer--; } return (il); } void * idr_replace(struct idr *idr, void *ptr, int id) { struct idr_layer *il; void *res; int idx; mtx_lock(&idr->lock); il = idr_find_layer_locked(idr, id); idx = id & IDR_MASK; /* Replace still returns an error if the item was not allocated. */ if (il == NULL || (il->bitmap & (1 << idx))) { res = ERR_PTR(-ENOENT); } else { res = il->ary[idx]; il->ary[idx] = ptr; } mtx_unlock(&idr->lock); return (res); } static inline void * idr_find_locked(struct idr *idr, int id) { struct idr_layer *il; void *res; mtx_assert(&idr->lock, MA_OWNED); il = idr_find_layer_locked(idr, id); if (il != NULL) res = il->ary[id & IDR_MASK]; else res = NULL; return (res); } void * idr_find(struct idr *idr, int id) { void *res; mtx_lock(&idr->lock); res = idr_find_locked(idr, id); mtx_unlock(&idr->lock); return (res); } void * idr_get_next(struct idr *idr, int *nextidp) { void *res = NULL; int id = *nextidp; mtx_lock(&idr->lock); for (; id <= idr_max(idr); id++) { res = idr_find_locked(idr, id); if (res == NULL) continue; *nextidp = id; break; } mtx_unlock(&idr->lock); return (res); } int idr_pre_get(struct idr *idr, gfp_t gfp_mask) { struct idr_layer *il, *iln; struct idr_layer *head; int need; mtx_lock(&idr->lock); for (;;) { need = idr->layers + 1; for (il = idr->free; il != NULL; il = il->ary[0]) need--; mtx_unlock(&idr->lock); if (need <= 0) break; for (head = NULL; need; need--) { iln = malloc(sizeof(*il), M_IDR, M_ZERO | gfp_mask); if (iln == NULL) break; bitmap_fill(&iln->bitmap, IDR_SIZE); if (head != NULL) { il->ary[0] = iln; il = iln; } else head = il = iln; } if (head == NULL) return (0); mtx_lock(&idr->lock); il->ary[0] = idr->free; idr->free = head; } return (1); } static struct idr_layer * idr_free_list_get(struct idr *idp) { struct idr_layer *il; if ((il = idp->free) != NULL) { idp->free = il->ary[0]; il->ary[0] = NULL; } return (il); } static inline struct idr_layer * idr_get(struct idr *idp) { struct idr_layer *il; if ((il = idr_free_list_get(idp)) != NULL) { MPASS(il->bitmap != 0); } else if ((il = malloc(sizeof(*il), M_IDR, M_ZERO | M_NOWAIT)) != NULL) { bitmap_fill(&il->bitmap, IDR_SIZE); } else if ((il = idr_preload_dequeue_locked(&DPCPU_GET(linux_idr_cache))) != NULL) { bitmap_fill(&il->bitmap, IDR_SIZE); } else { return (NULL); } return (il); } /* * Could be implemented as get_new_above(idr, ptr, 0, idp) but written * first for simplicity sake. */ static int idr_get_new_locked(struct idr *idr, void *ptr, int *idp) { struct idr_layer *stack[MAX_LEVEL]; struct idr_layer *il; int error; int layer; int idx; int id; mtx_assert(&idr->lock, MA_OWNED); error = -EAGAIN; /* * Expand the tree until there is free space. */ if (idr->top == NULL || idr->top->bitmap == 0) { if (idr->layers == MAX_LEVEL + 1) { error = -ENOSPC; goto out; } il = idr_get(idr); if (il == NULL) goto out; il->ary[0] = idr->top; if (idr->top) il->bitmap &= ~1; idr->top = il; idr->layers++; } il = idr->top; id = 0; /* * Walk the tree following free bitmaps, record our path. */ for (layer = idr->layers - 1;; layer--) { stack[layer] = il; idx = ffsl(il->bitmap); if (idx == 0) panic("idr_get_new: Invalid leaf state (%p, %p)\n", idr, il); idx--; id |= idx << (layer * IDR_BITS); if (layer == 0) break; if (il->ary[idx] == NULL) { il->ary[idx] = idr_get(idr); if (il->ary[idx] == NULL) goto out; } il = il->ary[idx]; } /* * Allocate the leaf to the consumer. */ il->bitmap &= ~(1 << idx); il->ary[idx] = ptr; *idp = id; /* * Clear bitmaps potentially up to the root. */ while (il->bitmap == 0 && ++layer < idr->layers) { il = stack[layer]; il->bitmap &= ~(1 << idr_pos(id, layer)); } error = 0; out: #ifdef INVARIANTS if (error == 0 && idr_find_locked(idr, id) != ptr) { panic("idr_get_new: Failed for idr %p, id %d, ptr %p\n", idr, id, ptr); } #endif return (error); } int idr_get_new(struct idr *idr, void *ptr, int *idp) { int retval; mtx_lock(&idr->lock); retval = idr_get_new_locked(idr, ptr, idp); mtx_unlock(&idr->lock); return (retval); } static int idr_get_new_above_locked(struct idr *idr, void *ptr, int starting_id, int *idp) { struct idr_layer *stack[MAX_LEVEL]; struct idr_layer *il; int error; int layer; int idx, sidx; int id; mtx_assert(&idr->lock, MA_OWNED); error = -EAGAIN; /* * Compute the layers required to support starting_id and the mask * at the top layer. */ restart: idx = starting_id; layer = 0; while (idx & ~IDR_MASK) { layer++; idx >>= IDR_BITS; } if (layer == MAX_LEVEL + 1) { error = -ENOSPC; goto out; } /* * Expand the tree until there is free space at or beyond starting_id. */ while (idr->layers <= layer || idr->top->bitmap < (1 << idr_pos(starting_id, idr->layers - 1))) { if (idr->layers == MAX_LEVEL + 1) { error = -ENOSPC; goto out; } il = idr_get(idr); if (il == NULL) goto out; il->ary[0] = idr->top; if (idr->top && idr->top->bitmap == 0) il->bitmap &= ~1; idr->top = il; idr->layers++; } il = idr->top; id = 0; /* * Walk the tree following free bitmaps, record our path. */ for (layer = idr->layers - 1;; layer--) { stack[layer] = il; sidx = idr_pos(starting_id, layer); /* Returns index numbered from 0 or size if none exists. */ idx = find_next_bit(&il->bitmap, IDR_SIZE, sidx); if (idx == IDR_SIZE && sidx == 0) panic("idr_get_new: Invalid leaf state (%p, %p)\n", idr, il); /* * We may have walked a path where there was a free bit but * it was lower than what we wanted. Restart the search with * a larger starting id. id contains the progress we made so * far. Search the leaf one above this level. This may * restart as many as MAX_LEVEL times but that is expected * to be rare. */ if (idx == IDR_SIZE) { starting_id = id + (1 << ((layer + 1) * IDR_BITS)); goto restart; } if (idx > sidx) starting_id = 0; /* Search the whole subtree. */ id |= idx << (layer * IDR_BITS); if (layer == 0) break; if (il->ary[idx] == NULL) { il->ary[idx] = idr_get(idr); if (il->ary[idx] == NULL) goto out; } il = il->ary[idx]; } /* * Allocate the leaf to the consumer. */ il->bitmap &= ~(1 << idx); il->ary[idx] = ptr; *idp = id; /* * Clear bitmaps potentially up to the root. */ while (il->bitmap == 0 && ++layer < idr->layers) { il = stack[layer]; il->bitmap &= ~(1 << idr_pos(id, layer)); } error = 0; out: #ifdef INVARIANTS if (error == 0 && idr_find_locked(idr, id) != ptr) { panic("idr_get_new_above: Failed for idr %p, id %d, ptr %p\n", idr, id, ptr); } #endif return (error); } int idr_get_new_above(struct idr *idr, void *ptr, int starting_id, int *idp) { int retval; mtx_lock(&idr->lock); retval = idr_get_new_above_locked(idr, ptr, starting_id, idp); mtx_unlock(&idr->lock); return (retval); } int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) { return (idr_get_new_above(&ida->idr, NULL, starting_id, p_id)); } static int idr_alloc_locked(struct idr *idr, void *ptr, int start, int end) { int max = end > 0 ? end - 1 : INT_MAX; int error; int id; mtx_assert(&idr->lock, MA_OWNED); if (unlikely(start < 0)) return (-EINVAL); if (unlikely(max < start)) return (-ENOSPC); if (start == 0) error = idr_get_new_locked(idr, ptr, &id); else error = idr_get_new_above_locked(idr, ptr, start, &id); if (unlikely(error < 0)) return (error); if (unlikely(id > max)) { idr_remove_locked(idr, id); return (-ENOSPC); } return (id); } int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) { int retval; mtx_lock(&idr->lock); retval = idr_alloc_locked(idr, ptr, start, end); mtx_unlock(&idr->lock); return (retval); } int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) { int retval; mtx_lock(&idr->lock); retval = idr_alloc_locked(idr, ptr, max(start, idr->next_cyclic_id), end); if (unlikely(retval == -ENOSPC)) retval = idr_alloc_locked(idr, ptr, start, end); if (likely(retval >= 0)) idr->next_cyclic_id = retval + 1; mtx_unlock(&idr->lock); return (retval); } static int idr_for_each_layer(struct idr_layer *il, int offset, int layer, int (*f)(int id, void *p, void *data), void *data) { int i, err; if (il == NULL) return (0); if (layer == 0) { for (i = 0; i < IDR_SIZE; i++) { if (il->ary[i] == NULL) continue; err = f(i + offset, il->ary[i], data); if (err) return (err); } return (0); } for (i = 0; i < IDR_SIZE; i++) { if (il->ary[i] == NULL) continue; err = idr_for_each_layer(il->ary[i], (i + offset) * IDR_SIZE, layer - 1, f, data); if (err) return (err); } return (0); } /* NOTE: It is not allowed to modify the IDR tree while it is being iterated */ int idr_for_each(struct idr *idp, int (*f)(int id, void *p, void *data), void *data) { return (idr_for_each_layer(idp->top, 0, idp->layers - 1, f, data)); } int ida_pre_get(struct ida *ida, gfp_t flags) { if (idr_pre_get(&ida->idr, flags) == 0) return (0); if (ida->free_bitmap == NULL) { ida->free_bitmap = malloc(sizeof(struct ida_bitmap), M_IDR, flags); } return (ida->free_bitmap != NULL); } int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, gfp_t flags) { int ret, id; unsigned int max; MPASS((int)start >= 0); MPASS((int)end >= 0); if (end == 0) max = 0x80000000; else { MPASS(end > start); max = end - 1; } again: if (!ida_pre_get(ida, flags)) return (-ENOMEM); if ((ret = ida_get_new_above(ida, start, &id)) == 0) { if (id > max) { ida_remove(ida, id); ret = -ENOSPC; } else { ret = id; } } if (__predict_false(ret == -EAGAIN)) goto again; return (ret); } void ida_simple_remove(struct ida *ida, unsigned int id) { idr_remove(&ida->idr, id); } void ida_remove(struct ida *ida, int id) -{ +{ idr_remove(&ida->idr, id); } void ida_init(struct ida *ida) { idr_init(&ida->idr); } void ida_destroy(struct ida *ida) { idr_destroy(&ida->idr); free(ida->free_bitmap, M_IDR); } Index: head/sys/compat/linuxkpi/common/src/linux_radix.c =================================================================== --- head/sys/compat/linuxkpi/common/src/linux_radix.c (revision 331432) +++ head/sys/compat/linuxkpi/common/src/linux_radix.c (revision 331433) @@ -1,257 +1,257 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2018 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_RADIX, "radix", "Linux radix compat"); static inline unsigned long radix_max(struct radix_tree_root *root) { return ((1UL << (root->height * RADIX_TREE_MAP_SHIFT)) - 1UL); } static inline int radix_pos(long id, int height) { return (id >> (RADIX_TREE_MAP_SHIFT * height)) & RADIX_TREE_MAP_MASK; } void * radix_tree_lookup(struct radix_tree_root *root, unsigned long index) { struct radix_tree_node *node; void *item; int height; item = NULL; node = root->rnode; height = root->height - 1; if (index > radix_max(root)) goto out; while (height && node) node = node->slots[radix_pos(index, height--)]; if (node) item = node->slots[radix_pos(index, 0)]; out: return (item); } bool radix_tree_iter_find(struct radix_tree_root *root, struct radix_tree_iter *iter, void ***pppslot) { struct radix_tree_node *node; unsigned long index = iter->index; int height; restart: node = root->rnode; if (node == NULL) return (false); height = root->height - 1; if (height == -1 || index > radix_max(root)) return (false); do { unsigned long mask = RADIX_TREE_MAP_MASK << (RADIX_TREE_MAP_SHIFT * height); unsigned long step = 1UL << (RADIX_TREE_MAP_SHIFT * height); int pos = radix_pos(index, height); struct radix_tree_node *next; /* track last slot */ *pppslot = node->slots + pos; next = node->slots[pos]; if (next == NULL) { index += step; index &= -step; if ((index & mask) == 0) goto restart; } else { node = next; height--; } } while (height != -1); iter->index = index; return (true); } void * radix_tree_delete(struct radix_tree_root *root, unsigned long index) { struct radix_tree_node *stack[RADIX_TREE_MAX_HEIGHT]; struct radix_tree_node *node; void *item; int height; int idx; item = NULL; node = root->rnode; height = root->height - 1; if (index > radix_max(root)) goto out; /* * Find the node and record the path in stack. */ while (height && node) { stack[height] = node; node = node->slots[radix_pos(index, height--)]; } idx = radix_pos(index, 0); if (node) item = node->slots[idx]; /* * If we removed something reduce the height of the tree. */ if (item) for (;;) { node->slots[idx] = NULL; node->count--; if (node->count > 0) break; free(node, M_RADIX); if (node == root->rnode) { root->rnode = NULL; root->height = 0; break; } height++; node = stack[height]; idx = radix_pos(index, height); } out: return (item); } int radix_tree_insert(struct radix_tree_root *root, unsigned long index, void *item) { struct radix_tree_node *node; struct radix_tree_node *temp[RADIX_TREE_MAX_HEIGHT - 1]; int height; int idx; /* bail out upon insertion of a NULL item */ if (item == NULL) return (-EINVAL); /* get root node, if any */ node = root->rnode; /* allocate root node, if any */ if (node == NULL) { node = malloc(sizeof(*node), M_RADIX, root->gfp_mask | M_ZERO); if (node == NULL) return (-ENOMEM); root->rnode = node; root->height++; } /* expand radix tree as needed */ while (radix_max(root) < index) { /* check if the radix tree is getting too big */ if (root->height == RADIX_TREE_MAX_HEIGHT) return (-E2BIG); /* * If the root radix level is not empty, we need to * allocate a new radix level: */ if (node->count != 0) { node = malloc(sizeof(*node), M_RADIX, root->gfp_mask | M_ZERO); if (node == NULL) return (-ENOMEM); node->slots[0] = root->rnode; node->count++; root->rnode = node; } root->height++; } /* get radix tree height index */ height = root->height - 1; /* walk down the tree until the first missing node, if any */ for ( ; height != 0; height--) { idx = radix_pos(index, height); if (node->slots[idx] == NULL) break; node = node->slots[idx]; } /* allocate the missing radix levels, if any */ for (idx = 0; idx != height; idx++) { temp[idx] = malloc(sizeof(*node), M_RADIX, root->gfp_mask | M_ZERO); if (temp[idx] == NULL) { while(idx--) free(temp[idx], M_RADIX); /* Check if we should free the root node as well. */ if (root->rnode->count == 0) { free(root->rnode, M_RADIX); root->rnode = NULL; root->height = 0; } return (-ENOMEM); } } /* setup new radix levels, if any */ for ( ; height != 0; height--) { idx = radix_pos(index, height); node->slots[idx] = temp[height - 1]; node->count++; node = node->slots[idx]; } /* * Insert and adjust count if the item does not already exist. */ idx = radix_pos(index, 0); if (node->slots[idx]) return (-EEXIST); node->slots[idx] = item; node->count++; - + return (0); } Index: head/sys/compat/linuxkpi/common/src/linux_usb.c =================================================================== --- head/sys/compat/linuxkpi/common/src/linux_usb.c (revision 331432) +++ head/sys/compat/linuxkpi/common/src/linux_usb.c (revision 331433) @@ -1,1746 +1,1746 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2007 Luigi Rizzo - Universita` di Pisa. All rights reserved. * Copyright (c) 2007 Hans Petter Selasky. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef USB_GLOBAL_INCLUDE_FILE #include USB_GLOBAL_INCLUDE_FILE #else #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define USB_DEBUG_VAR usb_debug #include #include #include #include #include #include #include #include #include #include #include #endif /* USB_GLOBAL_INCLUDE_FILE */ struct usb_linux_softc { LIST_ENTRY(usb_linux_softc) sc_attached_list; device_t sc_fbsd_dev; struct usb_device *sc_fbsd_udev; struct usb_interface *sc_ui; struct usb_driver *sc_udrv; }; /* prototypes */ static device_probe_t usb_linux_probe; static device_attach_t usb_linux_attach; static device_detach_t usb_linux_detach; static device_suspend_t usb_linux_suspend; static device_resume_t usb_linux_resume; static usb_callback_t usb_linux_isoc_callback; static usb_callback_t usb_linux_non_isoc_callback; static usb_complete_t usb_linux_wait_complete; static uint16_t usb_max_isoc_frames(struct usb_device *); static int usb_start_wait_urb(struct urb *, usb_timeout_t, uint16_t *); static const struct usb_device_id *usb_linux_lookup_id( const struct usb_device_id *, struct usb_attach_arg *); static struct usb_driver *usb_linux_get_usb_driver(struct usb_linux_softc *); static int usb_linux_create_usb_device(struct usb_device *, device_t); static void usb_linux_cleanup_interface(struct usb_device *, struct usb_interface *); static void usb_linux_complete(struct usb_xfer *); static int usb_unlink_urb_sub(struct urb *, uint8_t); /*------------------------------------------------------------------------* * FreeBSD USB interface *------------------------------------------------------------------------*/ static LIST_HEAD(, usb_linux_softc) usb_linux_attached_list; static LIST_HEAD(, usb_driver) usb_linux_driver_list; static device_method_t usb_linux_methods[] = { /* Device interface */ DEVMETHOD(device_probe, usb_linux_probe), DEVMETHOD(device_attach, usb_linux_attach), DEVMETHOD(device_detach, usb_linux_detach), DEVMETHOD(device_suspend, usb_linux_suspend), DEVMETHOD(device_resume, usb_linux_resume), DEVMETHOD_END }; static driver_t usb_linux_driver = { .name = "usb_linux", .methods = usb_linux_methods, .size = sizeof(struct usb_linux_softc), }; static devclass_t usb_linux_devclass; DRIVER_MODULE(usb_linux, uhub, usb_linux_driver, usb_linux_devclass, NULL, 0); MODULE_VERSION(usb_linux, 1); /*------------------------------------------------------------------------* * usb_linux_lookup_id * * This functions takes an array of "struct usb_device_id" and tries * to match the entries with the information in "struct usb_attach_arg". * If it finds a match the matching entry will be returned. * Else "NULL" will be returned. *------------------------------------------------------------------------*/ static const struct usb_device_id * usb_linux_lookup_id(const struct usb_device_id *id, struct usb_attach_arg *uaa) { if (id == NULL) { goto done; } /* * Keep on matching array entries until we find one with * "match_flags" equal to zero, which indicates the end of the * array: */ for (; id->match_flags; id++) { if ((id->match_flags & USB_DEVICE_ID_MATCH_VENDOR) && (id->idVendor != uaa->info.idVendor)) { continue; } if ((id->match_flags & USB_DEVICE_ID_MATCH_PRODUCT) && (id->idProduct != uaa->info.idProduct)) { continue; } if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_LO) && (id->bcdDevice_lo > uaa->info.bcdDevice)) { continue; } if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_HI) && (id->bcdDevice_hi < uaa->info.bcdDevice)) { continue; } if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_CLASS) && (id->bDeviceClass != uaa->info.bDeviceClass)) { continue; } if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_SUBCLASS) && (id->bDeviceSubClass != uaa->info.bDeviceSubClass)) { continue; } if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_PROTOCOL) && (id->bDeviceProtocol != uaa->info.bDeviceProtocol)) { continue; } if ((uaa->info.bDeviceClass == 0xFF) && !(id->match_flags & USB_DEVICE_ID_MATCH_VENDOR) && (id->match_flags & (USB_DEVICE_ID_MATCH_INT_CLASS | USB_DEVICE_ID_MATCH_INT_SUBCLASS | USB_DEVICE_ID_MATCH_INT_PROTOCOL))) { continue; } if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_CLASS) && (id->bInterfaceClass != uaa->info.bInterfaceClass)) { continue; } if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_SUBCLASS) && (id->bInterfaceSubClass != uaa->info.bInterfaceSubClass)) { continue; } if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_PROTOCOL) && (id->bInterfaceProtocol != uaa->info.bInterfaceProtocol)) { continue; } /* we found a match! */ return (id); } done: return (NULL); } /*------------------------------------------------------------------------* * usb_linux_probe * * This function is the FreeBSD probe callback. It is called from the * FreeBSD USB stack through the "device_probe_and_attach()" function. *------------------------------------------------------------------------*/ static int usb_linux_probe(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); struct usb_driver *udrv; int err = ENXIO; if (uaa->usb_mode != USB_MODE_HOST) { return (ENXIO); } mtx_lock(&Giant); LIST_FOREACH(udrv, &usb_linux_driver_list, linux_driver_list) { if (usb_linux_lookup_id(udrv->id_table, uaa)) { err = 0; break; } } mtx_unlock(&Giant); return (err); } /*------------------------------------------------------------------------* * usb_linux_get_usb_driver * * This function returns the pointer to the "struct usb_driver" where * the Linux USB device driver "struct usb_device_id" match was found. * We apply a lock before reading out the pointer to avoid races. *------------------------------------------------------------------------*/ static struct usb_driver * usb_linux_get_usb_driver(struct usb_linux_softc *sc) { struct usb_driver *udrv; mtx_lock(&Giant); udrv = sc->sc_udrv; mtx_unlock(&Giant); return (udrv); } /*------------------------------------------------------------------------* * usb_linux_attach * * This function is the FreeBSD attach callback. It is called from the * FreeBSD USB stack through the "device_probe_and_attach()" function. * This function is called when "usb_linux_probe()" returns zero. *------------------------------------------------------------------------*/ static int usb_linux_attach(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); struct usb_linux_softc *sc = device_get_softc(dev); struct usb_driver *udrv; const struct usb_device_id *id = NULL; mtx_lock(&Giant); LIST_FOREACH(udrv, &usb_linux_driver_list, linux_driver_list) { id = usb_linux_lookup_id(udrv->id_table, uaa); if (id) break; } mtx_unlock(&Giant); if (id == NULL) { return (ENXIO); } if (usb_linux_create_usb_device(uaa->device, dev) != 0) return (ENOMEM); device_set_usb_desc(dev); sc->sc_fbsd_udev = uaa->device; sc->sc_fbsd_dev = dev; sc->sc_udrv = udrv; sc->sc_ui = usb_ifnum_to_if(uaa->device, uaa->info.bIfaceNum); if (sc->sc_ui == NULL) { return (EINVAL); } if (udrv->probe) { if ((udrv->probe) (sc->sc_ui, id)) { return (ENXIO); } } mtx_lock(&Giant); LIST_INSERT_HEAD(&usb_linux_attached_list, sc, sc_attached_list); mtx_unlock(&Giant); /* success */ return (0); } /*------------------------------------------------------------------------* * usb_linux_detach * * This function is the FreeBSD detach callback. It is called from the * FreeBSD USB stack through the "device_detach()" function. *------------------------------------------------------------------------*/ static int usb_linux_detach(device_t dev) { struct usb_linux_softc *sc = device_get_softc(dev); struct usb_driver *udrv = NULL; mtx_lock(&Giant); if (sc->sc_attached_list.le_prev) { LIST_REMOVE(sc, sc_attached_list); sc->sc_attached_list.le_prev = NULL; udrv = sc->sc_udrv; sc->sc_udrv = NULL; } mtx_unlock(&Giant); if (udrv && udrv->disconnect) { (udrv->disconnect) (sc->sc_ui); } /* * Make sure that we free all FreeBSD USB transfers belonging to * this Linux "usb_interface", hence they will most likely not be * needed any more. */ usb_linux_cleanup_interface(sc->sc_fbsd_udev, sc->sc_ui); return (0); } /*------------------------------------------------------------------------* * usb_linux_suspend * * This function is the FreeBSD suspend callback. Usually it does nothing. *------------------------------------------------------------------------*/ static int usb_linux_suspend(device_t dev) { struct usb_linux_softc *sc = device_get_softc(dev); struct usb_driver *udrv = usb_linux_get_usb_driver(sc); int err; if (udrv && udrv->suspend) { err = (udrv->suspend) (sc->sc_ui, 0); } return (0); } /*------------------------------------------------------------------------* * usb_linux_resume * * This function is the FreeBSD resume callback. Usually it does nothing. *------------------------------------------------------------------------*/ static int usb_linux_resume(device_t dev) { struct usb_linux_softc *sc = device_get_softc(dev); struct usb_driver *udrv = usb_linux_get_usb_driver(sc); int err; if (udrv && udrv->resume) { err = (udrv->resume) (sc->sc_ui); } return (0); } /*------------------------------------------------------------------------* * Linux emulation layer *------------------------------------------------------------------------*/ /*------------------------------------------------------------------------* * usb_max_isoc_frames * * The following function returns the maximum number of isochronous * frames that we support per URB. It is not part of the Linux USB API. *------------------------------------------------------------------------*/ static uint16_t usb_max_isoc_frames(struct usb_device *dev) { ; /* indent fix */ switch (usbd_get_speed(dev)) { case USB_SPEED_LOW: case USB_SPEED_FULL: return (USB_MAX_FULL_SPEED_ISOC_FRAMES); default: return (USB_MAX_HIGH_SPEED_ISOC_FRAMES); } } /*------------------------------------------------------------------------* * usb_submit_urb * * This function is used to queue an URB after that it has been * initialized. If it returns non-zero, it means that the URB was not * queued. *------------------------------------------------------------------------*/ int usb_submit_urb(struct urb *urb, uint16_t mem_flags) { struct usb_host_endpoint *uhe; uint8_t do_unlock; int err; if (urb == NULL) return (-EINVAL); do_unlock = mtx_owned(&Giant) ? 0 : 1; if (do_unlock) mtx_lock(&Giant); if (urb->endpoint == NULL) { err = -EINVAL; goto done; } /* - * Check to see if the urb is in the process of being killed - * and stop a urb that is in the process of being killed from - * being re-submitted (e.g. from its completion callback - * function). - */ + * Check to see if the urb is in the process of being killed + * and stop a urb that is in the process of being killed from + * being re-submitted (e.g. from its completion callback + * function). + */ if (urb->kill_count != 0) { err = -EPERM; goto done; } uhe = urb->endpoint; /* * Check that we have got a FreeBSD USB transfer that will dequeue * the URB structure and do the real transfer. If there are no USB * transfers, then we return an error. */ if (uhe->bsd_xfer[0] || uhe->bsd_xfer[1]) { /* we are ready! */ TAILQ_INSERT_TAIL(&uhe->bsd_urb_list, urb, bsd_urb_list); urb->status = -EINPROGRESS; usbd_transfer_start(uhe->bsd_xfer[0]); usbd_transfer_start(uhe->bsd_xfer[1]); err = 0; } else { /* no pipes have been setup yet! */ urb->status = -EINVAL; err = -EINVAL; } done: if (do_unlock) mtx_unlock(&Giant); return (err); } /*------------------------------------------------------------------------* * usb_unlink_urb * * This function is used to stop an URB after that it is been * submitted, but before the "complete" callback has been called. On *------------------------------------------------------------------------*/ int usb_unlink_urb(struct urb *urb) { return (usb_unlink_urb_sub(urb, 0)); } static void usb_unlink_bsd(struct usb_xfer *xfer, struct urb *urb, uint8_t drain) { if (xfer == NULL) return; if (!usbd_transfer_pending(xfer)) return; if (xfer->priv_fifo == (void *)urb) { if (drain) { mtx_unlock(&Giant); usbd_transfer_drain(xfer); mtx_lock(&Giant); } else { usbd_transfer_stop(xfer); } usbd_transfer_start(xfer); } } static int usb_unlink_urb_sub(struct urb *urb, uint8_t drain) { struct usb_host_endpoint *uhe; uint16_t x; uint8_t do_unlock; int err; if (urb == NULL) return (-EINVAL); do_unlock = mtx_owned(&Giant) ? 0 : 1; if (do_unlock) mtx_lock(&Giant); if (drain) urb->kill_count++; if (urb->endpoint == NULL) { err = -EINVAL; goto done; } uhe = urb->endpoint; if (urb->bsd_urb_list.tqe_prev) { /* not started yet, just remove it from the queue */ TAILQ_REMOVE(&uhe->bsd_urb_list, urb, bsd_urb_list); urb->bsd_urb_list.tqe_prev = NULL; urb->status = -ECONNRESET; urb->actual_length = 0; for (x = 0; x < urb->number_of_packets; x++) { urb->iso_frame_desc[x].actual_length = 0; } if (urb->complete) { (urb->complete) (urb); } } else { /* * If the URB is not on the URB list, then check if one of * the FreeBSD USB transfer are processing the current URB. * If so, re-start that transfer, which will lead to the * termination of that URB: */ usb_unlink_bsd(uhe->bsd_xfer[0], urb, drain); usb_unlink_bsd(uhe->bsd_xfer[1], urb, drain); } err = 0; done: if (drain) urb->kill_count--; if (do_unlock) mtx_unlock(&Giant); return (err); } /*------------------------------------------------------------------------* * usb_clear_halt * * This function must always be used to clear the stall. Stall is when * an USB endpoint returns a stall message to the USB host controller. * Until the stall is cleared, no data can be transferred. *------------------------------------------------------------------------*/ int usb_clear_halt(struct usb_device *dev, struct usb_host_endpoint *uhe) { struct usb_config cfg[1]; struct usb_endpoint *ep; uint8_t type; uint8_t addr; if (uhe == NULL) return (-EINVAL); type = uhe->desc.bmAttributes & UE_XFERTYPE; addr = uhe->desc.bEndpointAddress; memset(cfg, 0, sizeof(cfg)); cfg[0].type = type; cfg[0].endpoint = addr & UE_ADDR; cfg[0].direction = addr & (UE_DIR_OUT | UE_DIR_IN); ep = usbd_get_endpoint(dev, uhe->bsd_iface_index, cfg); if (ep == NULL) return (-EINVAL); usbd_clear_data_toggle(dev, ep); return (usb_control_msg(dev, &dev->ep0, UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT, UF_ENDPOINT_HALT, addr, NULL, 0, 1000)); } /*------------------------------------------------------------------------* * usb_start_wait_urb * * This is an internal function that is used to perform synchronous * Linux USB transfers. *------------------------------------------------------------------------*/ static int usb_start_wait_urb(struct urb *urb, usb_timeout_t timeout, uint16_t *p_actlen) { int err; uint8_t do_unlock; /* you must have a timeout! */ if (timeout == 0) { timeout = 1; } urb->complete = &usb_linux_wait_complete; urb->timeout = timeout; urb->transfer_flags |= URB_WAIT_WAKEUP; urb->transfer_flags &= ~URB_IS_SLEEPING; do_unlock = mtx_owned(&Giant) ? 0 : 1; if (do_unlock) mtx_lock(&Giant); err = usb_submit_urb(urb, 0); if (err) goto done; /* * the URB might have completed before we get here, so check that by * using some flags! */ while (urb->transfer_flags & URB_WAIT_WAKEUP) { urb->transfer_flags |= URB_IS_SLEEPING; cv_wait(&urb->cv_wait, &Giant); urb->transfer_flags &= ~URB_IS_SLEEPING; } err = urb->status; done: if (do_unlock) mtx_unlock(&Giant); if (p_actlen != NULL) { if (err) *p_actlen = 0; else *p_actlen = urb->actual_length; } return (err); } /*------------------------------------------------------------------------* * usb_control_msg * * The following function performs a control transfer sequence one any * control, bulk or interrupt endpoint, specified by "uhe". A control * transfer means that you transfer an 8-byte header first followed by * a data-phase as indicated by the 8-byte header. The "timeout" is * given in milliseconds. * * Return values: * 0: Success * < 0: Failure * > 0: Actual length *------------------------------------------------------------------------*/ int usb_control_msg(struct usb_device *dev, struct usb_host_endpoint *uhe, uint8_t request, uint8_t requesttype, uint16_t value, uint16_t index, void *data, uint16_t size, usb_timeout_t timeout) { struct usb_device_request req; struct urb *urb; int err; uint16_t actlen; uint8_t type; uint8_t addr; req.bmRequestType = requesttype; req.bRequest = request; USETW(req.wValue, value); USETW(req.wIndex, index); USETW(req.wLength, size); if (uhe == NULL) { return (-EINVAL); } type = (uhe->desc.bmAttributes & UE_XFERTYPE); addr = (uhe->desc.bEndpointAddress & UE_ADDR); if (type != UE_CONTROL) { return (-EINVAL); } if (addr == 0) { /* * The FreeBSD USB stack supports standard control * transfers on control endpoint zero: */ err = usbd_do_request_flags(dev, NULL, &req, data, USB_SHORT_XFER_OK, &actlen, timeout); if (err) { err = -EPIPE; } else { err = actlen; } return (err); } if (dev->flags.usb_mode != USB_MODE_HOST) { /* not supported */ return (-EINVAL); } err = usb_setup_endpoint(dev, uhe, 1 /* dummy */ ); /* * NOTE: we need to allocate real memory here so that we don't * transfer data to/from the stack! * * 0xFFFF is a FreeBSD specific magic value. */ urb = usb_alloc_urb(0xFFFF, size); if (urb == NULL) return (-ENOMEM); urb->dev = dev; urb->endpoint = uhe; memcpy(urb->setup_packet, &req, sizeof(req)); if (size && (!(req.bmRequestType & UT_READ))) { /* move the data to a real buffer */ memcpy(USB_ADD_BYTES(urb->setup_packet, sizeof(req)), data, size); } err = usb_start_wait_urb(urb, timeout, &actlen); if (req.bmRequestType & UT_READ) { if (actlen) { bcopy(USB_ADD_BYTES(urb->setup_packet, sizeof(req)), data, actlen); } } usb_free_urb(urb); if (err == 0) { err = actlen; } return (err); } /*------------------------------------------------------------------------* * usb_set_interface * * The following function will select which alternate setting of an * USB interface you plan to use. By default alternate setting with * index zero is selected. Note that "iface_no" is not the interface * index, but rather the value of "bInterfaceNumber". *------------------------------------------------------------------------*/ int usb_set_interface(struct usb_device *dev, uint8_t iface_no, uint8_t alt_index) { struct usb_interface *p_ui = usb_ifnum_to_if(dev, iface_no); int err; if (p_ui == NULL) return (-EINVAL); if (alt_index >= p_ui->num_altsetting) return (-EINVAL); usb_linux_cleanup_interface(dev, p_ui); err = -usbd_set_alt_interface_index(dev, p_ui->bsd_iface_index, alt_index); if (err == 0) { p_ui->cur_altsetting = p_ui->altsetting + alt_index; } return (err); } /*------------------------------------------------------------------------* * usb_setup_endpoint * * The following function is an extension to the Linux USB API that * allows you to set a maximum buffer size for a given USB endpoint. * The maximum buffer size is per URB. If you don't call this function * to set a maximum buffer size, the endpoint will not be functional. * Note that for isochronous endpoints the maximum buffer size must be * a non-zero dummy, hence this function will base the maximum buffer * size on "wMaxPacketSize". *------------------------------------------------------------------------*/ int usb_setup_endpoint(struct usb_device *dev, struct usb_host_endpoint *uhe, usb_size_t bufsize) { struct usb_config cfg[2]; uint8_t type = uhe->desc.bmAttributes & UE_XFERTYPE; uint8_t addr = uhe->desc.bEndpointAddress; if (uhe->fbsd_buf_size == bufsize) { /* optimize */ return (0); } usbd_transfer_unsetup(uhe->bsd_xfer, 2); uhe->fbsd_buf_size = bufsize; if (bufsize == 0) { return (0); } memset(cfg, 0, sizeof(cfg)); if (type == UE_ISOCHRONOUS) { /* * Isochronous transfers are special in that they don't fit * into the BULK/INTR/CONTROL transfer model. */ cfg[0].type = type; cfg[0].endpoint = addr & UE_ADDR; cfg[0].direction = addr & (UE_DIR_OUT | UE_DIR_IN); cfg[0].callback = &usb_linux_isoc_callback; cfg[0].bufsize = 0; /* use wMaxPacketSize */ cfg[0].frames = usb_max_isoc_frames(dev); cfg[0].flags.proxy_buffer = 1; #if 0 /* * The Linux USB API allows non back-to-back * isochronous frames which we do not support. If the * isochronous frames are not back-to-back we need to * do a copy, and then we need a buffer for * that. Enable this at your own risk. */ cfg[0].flags.ext_buffer = 1; #endif cfg[0].flags.short_xfer_ok = 1; bcopy(cfg, cfg + 1, sizeof(*cfg)); /* Allocate and setup two generic FreeBSD USB transfers */ if (usbd_transfer_setup(dev, &uhe->bsd_iface_index, uhe->bsd_xfer, cfg, 2, uhe, &Giant)) { return (-EINVAL); } } else { if (bufsize > (1 << 22)) { /* limit buffer size */ bufsize = (1 << 22); } /* Allocate and setup one generic FreeBSD USB transfer */ cfg[0].type = type; cfg[0].endpoint = addr & UE_ADDR; cfg[0].direction = addr & (UE_DIR_OUT | UE_DIR_IN); cfg[0].callback = &usb_linux_non_isoc_callback; cfg[0].bufsize = bufsize; cfg[0].flags.ext_buffer = 1; /* enable zero-copy */ cfg[0].flags.proxy_buffer = 1; cfg[0].flags.short_xfer_ok = 1; if (usbd_transfer_setup(dev, &uhe->bsd_iface_index, uhe->bsd_xfer, cfg, 1, uhe, &Giant)) { return (-EINVAL); } } return (0); } /*------------------------------------------------------------------------* * usb_linux_create_usb_device * * The following function is used to build up a per USB device * structure tree, that mimics the Linux one. The root structure * is returned by this function. *------------------------------------------------------------------------*/ static int usb_linux_create_usb_device(struct usb_device *udev, device_t dev) { struct usb_config_descriptor *cd = usbd_get_config_descriptor(udev); struct usb_descriptor *desc; struct usb_interface_descriptor *id; struct usb_endpoint_descriptor *ed; struct usb_interface *p_ui = NULL; struct usb_host_interface *p_uhi = NULL; struct usb_host_endpoint *p_uhe = NULL; usb_size_t size; uint16_t niface_total; uint16_t nedesc; uint16_t iface_no_curr; uint16_t iface_index; uint8_t pass; uint8_t iface_no; /* * We do two passes. One pass for computing necessary memory size * and one pass to initialize all the allocated memory structures. */ for (pass = 0; pass < 2; pass++) { iface_no_curr = 0xFFFF; niface_total = 0; iface_index = 0; nedesc = 0; desc = NULL; /* * Iterate over all the USB descriptors. Use the USB config * descriptor pointer provided by the FreeBSD USB stack. */ while ((desc = usb_desc_foreach(cd, desc))) { /* * Build up a tree according to the descriptors we * find: */ switch (desc->bDescriptorType) { case UDESC_DEVICE: break; case UDESC_ENDPOINT: ed = (void *)desc; if ((ed->bLength < sizeof(*ed)) || (iface_index == 0)) break; if (p_uhe) { bcopy(ed, &p_uhe->desc, sizeof(p_uhe->desc)); p_uhe->bsd_iface_index = iface_index - 1; TAILQ_INIT(&p_uhe->bsd_urb_list); p_uhe++; } if (p_uhi) { (p_uhi - 1)->desc.bNumEndpoints++; } nedesc++; break; case UDESC_INTERFACE: id = (void *)desc; if (id->bLength < sizeof(*id)) break; if (p_uhi) { bcopy(id, &p_uhi->desc, sizeof(p_uhi->desc)); p_uhi->desc.bNumEndpoints = 0; p_uhi->endpoint = p_uhe; p_uhi->string = ""; p_uhi->bsd_iface_index = iface_index; p_uhi++; } iface_no = id->bInterfaceNumber; niface_total++; if (iface_no_curr != iface_no) { if (p_ui) { p_ui->altsetting = p_uhi - 1; p_ui->cur_altsetting = p_uhi - 1; p_ui->num_altsetting = 1; p_ui->bsd_iface_index = iface_index; p_ui->linux_udev = udev; p_ui++; } iface_no_curr = iface_no; iface_index++; } else { if (p_ui) { (p_ui - 1)->num_altsetting++; } } break; default: break; } } if (pass == 0) { size = (sizeof(*p_uhe) * nedesc) + (sizeof(*p_ui) * iface_index) + (sizeof(*p_uhi) * niface_total); p_uhe = malloc(size, M_USBDEV, M_WAITOK | M_ZERO); p_ui = (void *)(p_uhe + nedesc); p_uhi = (void *)(p_ui + iface_index); udev->linux_iface_start = p_ui; udev->linux_iface_end = p_ui + iface_index; udev->linux_endpoint_start = p_uhe; udev->linux_endpoint_end = p_uhe + nedesc; udev->devnum = device_get_unit(dev); bcopy(&udev->ddesc, &udev->descriptor, sizeof(udev->descriptor)); bcopy(udev->ctrl_ep.edesc, &udev->ep0.desc, sizeof(udev->ep0.desc)); } } return (0); } /*------------------------------------------------------------------------* * usb_alloc_urb * * This function should always be used when you allocate an URB for * use with the USB Linux stack. In case of an isochronous transfer * you must specifiy the maximum number of "iso_packets" which you * plan to transfer per URB. This function is always blocking, and * "mem_flags" are not regarded like on Linux. *------------------------------------------------------------------------*/ struct urb * usb_alloc_urb(uint16_t iso_packets, uint16_t mem_flags) { struct urb *urb; usb_size_t size; if (iso_packets == 0xFFFF) { /* * FreeBSD specific magic value to ask for control transfer * memory allocation: */ size = sizeof(*urb) + sizeof(struct usb_device_request) + mem_flags; } else { size = sizeof(*urb) + (iso_packets * sizeof(urb->iso_frame_desc[0])); } urb = malloc(size, M_USBDEV, M_WAITOK | M_ZERO); if (urb) { cv_init(&urb->cv_wait, "URBWAIT"); if (iso_packets == 0xFFFF) { urb->setup_packet = (void *)(urb + 1); urb->transfer_buffer = (void *)(urb->setup_packet + sizeof(struct usb_device_request)); } else { urb->number_of_packets = iso_packets; } } return (urb); } /*------------------------------------------------------------------------* * usb_find_host_endpoint * * The following function will return the Linux USB host endpoint * structure that matches the given endpoint type and endpoint * value. If no match is found, NULL is returned. This function is not * part of the Linux USB API and is only used internally. *------------------------------------------------------------------------*/ struct usb_host_endpoint * usb_find_host_endpoint(struct usb_device *dev, uint8_t type, uint8_t ep) { struct usb_host_endpoint *uhe; struct usb_host_endpoint *uhe_end; struct usb_host_interface *uhi; struct usb_interface *ui; uint8_t ea; uint8_t at; uint8_t mask; if (dev == NULL) { return (NULL); } if (type == UE_CONTROL) { mask = UE_ADDR; } else { mask = (UE_DIR_IN | UE_DIR_OUT | UE_ADDR); } ep &= mask; /* * Iterate over all the interfaces searching the selected alternate * setting only, and all belonging endpoints. */ for (ui = dev->linux_iface_start; ui != dev->linux_iface_end; ui++) { uhi = ui->cur_altsetting; if (uhi) { uhe_end = uhi->endpoint + uhi->desc.bNumEndpoints; for (uhe = uhi->endpoint; uhe != uhe_end; uhe++) { ea = uhe->desc.bEndpointAddress; at = uhe->desc.bmAttributes; if (((ea & mask) == ep) && ((at & UE_XFERTYPE) == type)) { return (uhe); } } } } if ((type == UE_CONTROL) && ((ep & UE_ADDR) == 0)) { return (&dev->ep0); } return (NULL); } /*------------------------------------------------------------------------* * usb_altnum_to_altsetting * * The following function returns a pointer to an alternate setting by * index given a "usb_interface" pointer. If the alternate setting by * index does not exist, NULL is returned. And alternate setting is a * variant of an interface, but usually with slightly different * characteristics. *------------------------------------------------------------------------*/ struct usb_host_interface * usb_altnum_to_altsetting(const struct usb_interface *intf, uint8_t alt_index) { if (alt_index >= intf->num_altsetting) { return (NULL); } return (intf->altsetting + alt_index); } /*------------------------------------------------------------------------* * usb_ifnum_to_if * * The following function searches up an USB interface by * "bInterfaceNumber". If no match is found, NULL is returned. *------------------------------------------------------------------------*/ struct usb_interface * usb_ifnum_to_if(struct usb_device *dev, uint8_t iface_no) { struct usb_interface *p_ui; for (p_ui = dev->linux_iface_start; p_ui != dev->linux_iface_end; p_ui++) { if ((p_ui->num_altsetting > 0) && (p_ui->altsetting->desc.bInterfaceNumber == iface_no)) { return (p_ui); } } return (NULL); } /*------------------------------------------------------------------------* * usb_buffer_alloc *------------------------------------------------------------------------*/ void * usb_buffer_alloc(struct usb_device *dev, usb_size_t size, uint16_t mem_flags, uint8_t *dma_addr) { return (malloc(size, M_USBDEV, M_WAITOK | M_ZERO)); } /*------------------------------------------------------------------------* * usbd_get_intfdata *------------------------------------------------------------------------*/ void * usbd_get_intfdata(struct usb_interface *intf) { return (intf->bsd_priv_sc); } /*------------------------------------------------------------------------* * usb_linux_register * * The following function is used by the "USB_DRIVER_EXPORT()" macro, * and is used to register a Linux USB driver, so that its * "usb_device_id" structures gets searched a probe time. This * function is not part of the Linux USB API, and is for internal use * only. *------------------------------------------------------------------------*/ void usb_linux_register(void *arg) { struct usb_driver *drv = arg; mtx_lock(&Giant); LIST_INSERT_HEAD(&usb_linux_driver_list, drv, linux_driver_list); mtx_unlock(&Giant); usb_needs_explore_all(); } /*------------------------------------------------------------------------* * usb_linux_deregister * * The following function is used by the "USB_DRIVER_EXPORT()" macro, * and is used to deregister a Linux USB driver. This function will * ensure that all driver instances belonging to the Linux USB device * driver in question, gets detached before the driver is * unloaded. This function is not part of the Linux USB API, and is * for internal use only. *------------------------------------------------------------------------*/ void usb_linux_deregister(void *arg) { struct usb_driver *drv = arg; struct usb_linux_softc *sc; repeat: mtx_lock(&Giant); LIST_FOREACH(sc, &usb_linux_attached_list, sc_attached_list) { if (sc->sc_udrv == drv) { mtx_unlock(&Giant); device_detach(sc->sc_fbsd_dev); goto repeat; } } LIST_REMOVE(drv, linux_driver_list); mtx_unlock(&Giant); } /*------------------------------------------------------------------------* * usb_linux_free_device * * The following function is only used by the FreeBSD USB stack, to * cleanup and free memory after that a Linux USB device was attached. *------------------------------------------------------------------------*/ void usb_linux_free_device(struct usb_device *dev) { struct usb_host_endpoint *uhe; struct usb_host_endpoint *uhe_end; int err; uhe = dev->linux_endpoint_start; uhe_end = dev->linux_endpoint_end; while (uhe != uhe_end) { err = usb_setup_endpoint(dev, uhe, 0); uhe++; } err = usb_setup_endpoint(dev, &dev->ep0, 0); free(dev->linux_endpoint_start, M_USBDEV); } /*------------------------------------------------------------------------* * usb_buffer_free *------------------------------------------------------------------------*/ void usb_buffer_free(struct usb_device *dev, usb_size_t size, void *addr, uint8_t dma_addr) { free(addr, M_USBDEV); } /*------------------------------------------------------------------------* * usb_free_urb *------------------------------------------------------------------------*/ void usb_free_urb(struct urb *urb) { if (urb == NULL) { return; } /* make sure that the current URB is not active */ usb_kill_urb(urb); /* destroy condition variable */ cv_destroy(&urb->cv_wait); /* just free it */ free(urb, M_USBDEV); } /*------------------------------------------------------------------------* * usb_init_urb * * The following function can be used to initialize a custom URB. It * is not recommended to use this function. Use "usb_alloc_urb()" * instead. *------------------------------------------------------------------------*/ void usb_init_urb(struct urb *urb) { if (urb == NULL) { return; } memset(urb, 0, sizeof(*urb)); } /*------------------------------------------------------------------------* * usb_kill_urb *------------------------------------------------------------------------*/ void usb_kill_urb(struct urb *urb) { usb_unlink_urb_sub(urb, 1); } /*------------------------------------------------------------------------* * usb_set_intfdata * * The following function sets the per Linux USB interface private * data pointer. It is used by most Linux USB device drivers. *------------------------------------------------------------------------*/ void usb_set_intfdata(struct usb_interface *intf, void *data) { intf->bsd_priv_sc = data; } /*------------------------------------------------------------------------* * usb_linux_cleanup_interface * * The following function will release all FreeBSD USB transfers * associated with a Linux USB interface. It is for internal use only. *------------------------------------------------------------------------*/ static void usb_linux_cleanup_interface(struct usb_device *dev, struct usb_interface *iface) { struct usb_host_interface *uhi; struct usb_host_interface *uhi_end; struct usb_host_endpoint *uhe; struct usb_host_endpoint *uhe_end; int err; uhi = iface->altsetting; uhi_end = iface->altsetting + iface->num_altsetting; while (uhi != uhi_end) { uhe = uhi->endpoint; uhe_end = uhi->endpoint + uhi->desc.bNumEndpoints; while (uhe != uhe_end) { err = usb_setup_endpoint(dev, uhe, 0); uhe++; } uhi++; } } /*------------------------------------------------------------------------* * usb_linux_wait_complete * * The following function is used by "usb_start_wait_urb()" to wake it * up, when an USB transfer has finished. *------------------------------------------------------------------------*/ static void usb_linux_wait_complete(struct urb *urb) { if (urb->transfer_flags & URB_IS_SLEEPING) { cv_signal(&urb->cv_wait); } urb->transfer_flags &= ~URB_WAIT_WAKEUP; } /*------------------------------------------------------------------------* * usb_linux_complete *------------------------------------------------------------------------*/ static void usb_linux_complete(struct usb_xfer *xfer) { struct urb *urb; urb = usbd_xfer_get_priv(xfer); usbd_xfer_set_priv(xfer, NULL); if (urb->complete) { (urb->complete) (urb); } } /*------------------------------------------------------------------------* * usb_linux_isoc_callback * * The following is the FreeBSD isochronous USB callback. Isochronous * frames are USB packets transferred 1000 or 8000 times per second, * depending on whether a full- or high- speed USB transfer is * used. *------------------------------------------------------------------------*/ static void usb_linux_isoc_callback(struct usb_xfer *xfer, usb_error_t error) { usb_frlength_t max_frame = xfer->max_frame_size; usb_frlength_t offset; usb_frcount_t x; struct urb *urb = usbd_xfer_get_priv(xfer); struct usb_host_endpoint *uhe = usbd_xfer_softc(xfer); struct usb_iso_packet_descriptor *uipd; DPRINTF("\n"); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: if (urb->bsd_isread) { /* copy in data with regard to the URB */ offset = 0; for (x = 0; x < urb->number_of_packets; x++) { uipd = urb->iso_frame_desc + x; if (uipd->length > xfer->frlengths[x]) { if (urb->transfer_flags & URB_SHORT_NOT_OK) { /* XXX should be EREMOTEIO */ uipd->status = -EPIPE; } else { uipd->status = 0; } } else { uipd->status = 0; } uipd->actual_length = xfer->frlengths[x]; if (!xfer->flags.ext_buffer) { usbd_copy_out(xfer->frbuffers, offset, USB_ADD_BYTES(urb->transfer_buffer, uipd->offset), uipd->actual_length); } offset += max_frame; } } else { for (x = 0; x < urb->number_of_packets; x++) { uipd = urb->iso_frame_desc + x; uipd->actual_length = xfer->frlengths[x]; uipd->status = 0; } } urb->actual_length = xfer->actlen; /* check for short transfer */ if (xfer->actlen < xfer->sumlen) { /* short transfer */ if (urb->transfer_flags & URB_SHORT_NOT_OK) { /* XXX should be EREMOTEIO */ urb->status = -EPIPE; } else { urb->status = 0; } } else { /* success */ urb->status = 0; } /* call callback */ usb_linux_complete(xfer); case USB_ST_SETUP: tr_setup: if (xfer->priv_fifo == NULL) { /* get next transfer */ urb = TAILQ_FIRST(&uhe->bsd_urb_list); if (urb == NULL) { /* nothing to do */ return; } TAILQ_REMOVE(&uhe->bsd_urb_list, urb, bsd_urb_list); urb->bsd_urb_list.tqe_prev = NULL; x = xfer->max_frame_count; if (urb->number_of_packets > x) { /* XXX simply truncate the transfer */ urb->number_of_packets = x; } } else { DPRINTF("Already got a transfer\n"); /* already got a transfer (should not happen) */ urb = usbd_xfer_get_priv(xfer); } urb->bsd_isread = (uhe->desc.bEndpointAddress & UE_DIR_IN) ? 1 : 0; if (xfer->flags.ext_buffer) { /* set virtual address to load */ usbd_xfer_set_frame_data(xfer, 0, urb->transfer_buffer, 0); } if (!(urb->bsd_isread)) { /* copy out data with regard to the URB */ offset = 0; for (x = 0; x < urb->number_of_packets; x++) { uipd = urb->iso_frame_desc + x; usbd_xfer_set_frame_len(xfer, x, uipd->length); if (!xfer->flags.ext_buffer) { usbd_copy_in(xfer->frbuffers, offset, USB_ADD_BYTES(urb->transfer_buffer, uipd->offset), uipd->length); } offset += uipd->length; } } else { /* * compute the transfer length into the "offset" * variable */ offset = urb->number_of_packets * max_frame; /* setup "frlengths" array */ for (x = 0; x < urb->number_of_packets; x++) { uipd = urb->iso_frame_desc + x; usbd_xfer_set_frame_len(xfer, x, max_frame); } } usbd_xfer_set_priv(xfer, urb); xfer->flags.force_short_xfer = 0; xfer->timeout = urb->timeout; xfer->nframes = urb->number_of_packets; usbd_transfer_submit(xfer); return; default: /* Error */ if (xfer->error == USB_ERR_CANCELLED) { urb->status = -ECONNRESET; } else { urb->status = -EPIPE; /* stalled */ } /* Set zero for "actual_length" */ urb->actual_length = 0; /* Set zero for "actual_length" */ for (x = 0; x < urb->number_of_packets; x++) { urb->iso_frame_desc[x].actual_length = 0; urb->iso_frame_desc[x].status = urb->status; } /* call callback */ usb_linux_complete(xfer); if (xfer->error == USB_ERR_CANCELLED) { /* we need to return in this case */ return; } goto tr_setup; } } /*------------------------------------------------------------------------* * usb_linux_non_isoc_callback * * The following is the FreeBSD BULK/INTERRUPT and CONTROL USB * callback. It dequeues Linux USB stack compatible URB's, transforms * the URB fields into a FreeBSD USB transfer, and defragments the USB * transfer as required. When the transfer is complete the "complete" * callback is called. *------------------------------------------------------------------------*/ static void usb_linux_non_isoc_callback(struct usb_xfer *xfer, usb_error_t error) { enum { REQ_SIZE = sizeof(struct usb_device_request) }; struct urb *urb = usbd_xfer_get_priv(xfer); struct usb_host_endpoint *uhe = usbd_xfer_softc(xfer); uint8_t *ptr; usb_frlength_t max_bulk = usbd_xfer_max_len(xfer); uint8_t data_frame = xfer->flags_int.control_xfr ? 1 : 0; DPRINTF("\n"); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: if (xfer->flags_int.control_xfr) { /* don't transfer the setup packet again: */ usbd_xfer_set_frame_len(xfer, 0, 0); } if (urb->bsd_isread && (!xfer->flags.ext_buffer)) { /* copy in data with regard to the URB */ usbd_copy_out(xfer->frbuffers + data_frame, 0, urb->bsd_data_ptr, xfer->frlengths[data_frame]); } urb->bsd_length_rem -= xfer->frlengths[data_frame]; urb->bsd_data_ptr += xfer->frlengths[data_frame]; urb->actual_length += xfer->frlengths[data_frame]; /* check for short transfer */ if (xfer->actlen < xfer->sumlen) { urb->bsd_length_rem = 0; /* short transfer */ if (urb->transfer_flags & URB_SHORT_NOT_OK) { urb->status = -EPIPE; } else { urb->status = 0; } } else { /* check remainder */ if (urb->bsd_length_rem > 0) { goto setup_bulk; } /* success */ urb->status = 0; } /* call callback */ usb_linux_complete(xfer); case USB_ST_SETUP: tr_setup: /* get next transfer */ urb = TAILQ_FIRST(&uhe->bsd_urb_list); if (urb == NULL) { /* nothing to do */ return; } TAILQ_REMOVE(&uhe->bsd_urb_list, urb, bsd_urb_list); urb->bsd_urb_list.tqe_prev = NULL; usbd_xfer_set_priv(xfer, urb); xfer->flags.force_short_xfer = 0; xfer->timeout = urb->timeout; if (xfer->flags_int.control_xfr) { /* - * USB control transfers need special handling. - * First copy in the header, then copy in data! - */ + * USB control transfers need special handling. + * First copy in the header, then copy in data! + */ if (!xfer->flags.ext_buffer) { usbd_copy_in(xfer->frbuffers, 0, urb->setup_packet, REQ_SIZE); usbd_xfer_set_frame_len(xfer, 0, REQ_SIZE); } else { /* set virtual address to load */ usbd_xfer_set_frame_data(xfer, 0, urb->setup_packet, REQ_SIZE); } ptr = urb->setup_packet; /* setup data transfer direction and length */ urb->bsd_isread = (ptr[0] & UT_READ) ? 1 : 0; urb->bsd_length_rem = ptr[6] | (ptr[7] << 8); } else { /* setup data transfer direction */ urb->bsd_length_rem = urb->transfer_buffer_length; urb->bsd_isread = (uhe->desc.bEndpointAddress & UE_DIR_IN) ? 1 : 0; } urb->bsd_data_ptr = urb->transfer_buffer; urb->actual_length = 0; setup_bulk: if (max_bulk > urb->bsd_length_rem) { max_bulk = urb->bsd_length_rem; } /* check if we need to force a short transfer */ if ((max_bulk == urb->bsd_length_rem) && (urb->transfer_flags & URB_ZERO_PACKET) && (!xfer->flags_int.control_xfr)) { xfer->flags.force_short_xfer = 1; } /* check if we need to copy in data */ if (xfer->flags.ext_buffer) { /* set virtual address to load */ usbd_xfer_set_frame_data(xfer, data_frame, urb->bsd_data_ptr, max_bulk); } else if (!urb->bsd_isread) { /* copy out data with regard to the URB */ usbd_copy_in(xfer->frbuffers + data_frame, 0, urb->bsd_data_ptr, max_bulk); usbd_xfer_set_frame_len(xfer, data_frame, max_bulk); } if (xfer->flags_int.control_xfr) { if (max_bulk > 0) { xfer->nframes = 2; } else { xfer->nframes = 1; } } else { xfer->nframes = 1; } usbd_transfer_submit(xfer); return; default: if (xfer->error == USB_ERR_CANCELLED) { urb->status = -ECONNRESET; } else { urb->status = -EPIPE; } /* Set zero for "actual_length" */ urb->actual_length = 0; /* call callback */ usb_linux_complete(xfer); if (xfer->error == USB_ERR_CANCELLED) { /* we need to return in this case */ return; } goto tr_setup; } } /*------------------------------------------------------------------------* * usb_fill_bulk_urb *------------------------------------------------------------------------*/ void usb_fill_bulk_urb(struct urb *urb, struct usb_device *udev, struct usb_host_endpoint *uhe, void *buf, int length, usb_complete_t callback, void *arg) { urb->dev = udev; urb->endpoint = uhe; urb->transfer_buffer = buf; urb->transfer_buffer_length = length; urb->complete = callback; urb->context = arg; } /*------------------------------------------------------------------------* * usb_bulk_msg * * NOTE: This function can also be used for interrupt endpoints! * * Return values: * 0: Success * Else: Failure *------------------------------------------------------------------------*/ int usb_bulk_msg(struct usb_device *udev, struct usb_host_endpoint *uhe, void *data, int len, uint16_t *pactlen, usb_timeout_t timeout) { struct urb *urb; int err; if (uhe == NULL) return (-EINVAL); if (len < 0) return (-EINVAL); err = usb_setup_endpoint(udev, uhe, 4096 /* bytes */); if (err) return (err); urb = usb_alloc_urb(0, 0); if (urb == NULL) return (-ENOMEM); - usb_fill_bulk_urb(urb, udev, uhe, data, len, + usb_fill_bulk_urb(urb, udev, uhe, data, len, usb_linux_wait_complete, NULL); err = usb_start_wait_urb(urb, timeout, pactlen); usb_free_urb(urb); return (err); } MODULE_DEPEND(linuxkpi, usb, 1, 1, 1); static void usb_linux_init(void *arg) { /* register our function */ usb_linux_free_device_p = &usb_linux_free_device; } SYSINIT(usb_linux_init, SI_SUB_LOCK, SI_ORDER_FIRST, usb_linux_init, NULL); SYSUNINIT(usb_linux_unload, SI_SUB_LOCK, SI_ORDER_ANY, usb_linux_unload, NULL);