Index: sys/compat/linuxkpi/common/include/asm/atomic.h =================================================================== --- sys/compat/linuxkpi/common/include/asm/atomic.h +++ sys/compat/linuxkpi/common/include/asm/atomic.h @@ -159,36 +159,41 @@ return (ret); } -#define cmpxchg(ptr, old, new) ({ \ - __typeof(*(ptr)) __ret; \ - \ - CTASSERT(sizeof(__ret) == 1 || sizeof(__ret) == 2 || \ - sizeof(__ret) == 4 || sizeof(__ret) == 8); \ - \ - __ret = (old); \ - switch (sizeof(__ret)) { \ - case 1: \ - while (!atomic_fcmpset_8((volatile int8_t *)(ptr), \ - (int8_t *)&__ret, (new)) && __ret == (old)) \ - ; \ - break; \ - case 2: \ - while (!atomic_fcmpset_16((volatile int16_t *)(ptr), \ - (int16_t *)&__ret, (new)) && __ret == (old)) \ - ; \ - break; \ - case 4: \ - while (!atomic_fcmpset_32((volatile int32_t *)(ptr), \ - (int32_t *)&__ret, (new)) && __ret == (old)) \ - ; \ - break; \ - case 8: \ - while (!atomic_fcmpset_64((volatile int64_t *)(ptr), \ - (int64_t *)&__ret, (new)) && __ret == (old)) \ - ; \ - break; \ - } \ - __ret; \ +#define cmpxchg(ptr, old, new) ({ \ + union { \ + __typeof(*(ptr)) val; \ + u8 u8[0]; \ + u16 u16[0]; \ + u32 u32[0]; \ + u64 u64[0]; \ + } __ret = { .val = (old) }, __new = { .val = (new) }; \ + \ + CTASSERT(sizeof(__ret) == 1 || sizeof(__ret) == 2 || \ + sizeof(__ret) == 4 || sizeof(__ret) == 8); \ + \ + switch (sizeof(__ret.val)) { \ + case 1: \ + while (!atomic_fcmpset_8((volatile u8 *)(ptr), \ + __ret.u8, __new.u8[0]) && __ret.val == (old)) \ + ; \ + break; \ + case 2: \ + while (!atomic_fcmpset_16((volatile u16 *)(ptr), \ + __ret.u16, __new.u16[0]) && __ret.val == (old)) \ + ; \ + break; \ + case 4: \ + while (!atomic_fcmpset_32((volatile u32 *)(ptr), \ + __ret.u32, __new.u32[0]) && __ret.val == (old)) \ + ; \ + break; \ + case 8: \ + while (!atomic_fcmpset_64((volatile u64 *)(ptr), \ + __ret.u64, __new.u64[0]) && __ret.val == (old)) \ + ; \ + break; \ + } \ + __ret.val; \ }) #define cmpxchg_relaxed(...) cmpxchg(__VA_ARGS__) Index: sys/compat/linuxkpi/common/include/linux/compiler.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/compiler.h +++ sys/compat/linuxkpi/common/include/linux/compiler.h @@ -34,39 +34,41 @@ #include -#define __user -#define __kernel -#define __safe -#define __force -#define __nocast -#define __iomem -#define __chk_user_ptr(x) ((void)0) -#define __chk_io_ptr(x) ((void)0) -#define __builtin_warning(x, y...) (1) -#define __acquires(x) -#define __releases(x) -#define __acquire(x) do { } while (0) -#define __release(x) do { } while (0) -#define __cond_lock(x,c) (c) +#define __user +#define __kernel +#define __safe +#define __force +#define __nocast +#define __iomem +#define __chk_user_ptr(x) ((void)0) +#define __chk_io_ptr(x) ((void)0) +#define __builtin_warning(x, y...) (1) +#define __acquires(x) +#define __releases(x) +#define __acquire(x) do { } while (0) +#define __release(x) do { } while (0) +#define __cond_lock(x,c) (c) #define __bitwise -#define __devinitdata +#define __devinitdata #define __deprecated -#define __init +#define __init #define __devinit #define __devexit -#define __exit +#define __exit #define __rcu +#define __percpu +#define __weak __attribute__((weak)) #define __malloc #define ___stringify(...) #__VA_ARGS__ #define __stringify(...) ___stringify(__VA_ARGS__) #define __attribute_const__ __attribute__((__const__)) -#undef __always_inline +#undef __always_inline #define __always_inline inline #define ____cacheline_aligned __aligned(CACHE_LINE_SIZE) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) -#define typeof(x) __typeof(x) +#define typeof(x) __typeof(x) #define uninitialized_var(x) x = x #define __always_unused __unused Index: sys/compat/linuxkpi/common/include/linux/dcache.h =================================================================== --- /dev/null +++ sys/compat/linuxkpi/common/include/linux/dcache.h @@ -0,0 +1,49 @@ +/*- + * Copyright (c) 2017 Limelight Networks, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ +#ifndef __LINUX_DCACHE_H +#define __LINUX_DCACHE_H + +struct inode; +struct dentry; +struct path; +struct pfs_node; + +struct dentry { + struct inode *d_inode; + struct dentry *d_parent; /* parent directory */ + /* FreeBSD */ + struct pfs_node *d_pfs_node; +}; + + +static inline struct inode *d_inode(const struct dentry *dentry) +{ + return (dentry->d_inode); +} + +#endif Index: sys/compat/linuxkpi/common/include/linux/device.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/device.h +++ sys/compat/linuxkpi/common/include/linux/device.h @@ -46,9 +46,6 @@ #include -enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED, IRQ_WAKE_THREAD, }; -typedef enum irqreturn irqreturn_t; - struct device; struct fwnode_handle; @@ -184,7 +181,7 @@ #define dev_notice(dev, fmt, ...) device_printf((dev)->bsddev, fmt, ##__VA_ARGS__) #define dev_dbg(dev, fmt, ...) do { } while (0) #define dev_printk(lvl, dev, fmt, ...) \ - device_printf((dev)->bsddev, fmt, ##__VA_ARGS__) + device_printf((dev)->bsddev, fmt, ##__VA_ARGS__) #define dev_err_ratelimited(dev, ...) do { \ static linux_ratelimit_t __ratelimited; \ Index: sys/compat/linuxkpi/common/include/linux/fs.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/fs.h +++ sys/compat/linuxkpi/common/include/linux/fs.h @@ -42,6 +42,7 @@ #include #include #include +#include struct module; struct kiocb; @@ -65,11 +66,6 @@ typedef struct files_struct *fl_owner_t; -struct dentry { - struct inode *d_inode; - struct pfs_node *d_pfs_node; -}; - struct file_operations; struct linux_file_wait_queue { @@ -219,7 +215,7 @@ register_chrdev_region(dev_t dev, unsigned range, const char *name) { - return 0; + return (0); } static inline void @@ -234,14 +230,14 @@ const char *name) { - return 0; + return (0); } /* No current support for seek op in FreeBSD */ static inline int nonseekable_open(struct inode *inode, struct file *filp) { - return 0; + return (0); } extern unsigned int linux_iminor(struct inode *); @@ -288,6 +284,18 @@ return (file->_file->f_offset); } +static inline struct inode * +file_inode(const struct file *f) +{ + return (f->f_inode); +} + +static inline int +call_mmap(struct file *file, struct vm_area_struct *vma) +{ + return (file->f_op->mmap(file, vma)); +} + /* Shared memory support */ unsigned long linux_invalidate_mapping_pages(vm_object_t, pgoff_t, pgoff_t); struct page *linux_shmem_read_mapping_page_gfp(vm_object_t, int, gfp_t); Index: sys/compat/linuxkpi/common/include/linux/gfp.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/gfp.h +++ sys/compat/linuxkpi/common/include/linux/gfp.h @@ -55,6 +55,9 @@ #define __GFP_WAIT M_WAITOK #define __GFP_DMA32 (1U << 24) /* LinuxKPI only */ +#define __GFP_BITS_SHIFT 25 +#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) + #define GFP_NOWAIT M_NOWAIT #define GFP_ATOMIC (M_NOWAIT | M_USE_RESERVE) #define GFP_KERNEL M_WAITOK Index: sys/compat/linuxkpi/common/include/linux/interrupt.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/interrupt.h +++ sys/compat/linuxkpi/common/include/linux/interrupt.h @@ -33,13 +33,13 @@ #include #include +#include #include #include -typedef irqreturn_t (*irq_handler_t)(int, void *); -#define IRQ_RETVAL(x) ((x) != IRQ_NONE) +typedef irqreturn_t (*irq_handler_t)(int, void *); #define IRQF_SHARED RF_SHAREABLE @@ -58,7 +58,7 @@ { if (irq == dev->irq) return (0); - return irq - dev->msix + 1; + return (irq - dev->msix + 1); } extern void linux_irq_handler(void *); @@ -87,7 +87,7 @@ dev = linux_pci_find_irq_dev(irq); if (dev == NULL) - return -ENXIO; + return (-ENXIO); rid = linux_irq_rid(dev, irq); res = bus_alloc_resource_any(dev->bsddev, SYS_RES_IRQ, &rid, flags | RF_ACTIVE); @@ -108,7 +108,43 @@ } list_add(&irqe->links, &dev->irqents); - return 0; + return (0); +} + +static inline void +enable_irq(unsigned int irq) +{ + struct irq_ent *irqe; + struct device *dev; + int error; + + dev = linux_pci_find_irq_dev(irq); + if (dev == NULL) + return; + irqe = linux_irq_ent(dev, irq); + if (irqe == NULL) + return; + error = bus_setup_intr(dev->bsddev, irqe->res, INTR_TYPE_NET | INTR_MPSAFE, + NULL, linux_irq_handler, irqe, &irqe->tag); + if (error) { + device_printf(dev->bsddev, "linuxkpi enable irq error\n"); + } +} + +static inline void +disable_irq(unsigned int irq) +{ + struct irq_ent *irqe; + struct device *dev; + + dev = linux_pci_find_irq_dev(irq); + if (dev == NULL) + return; + irqe = linux_irq_ent(dev, irq); + if (irqe == NULL) + return; + bus_teardown_intr(dev->bsddev, irqe->res, irqe->tag); + irqe->tag = NULL; } static inline int @@ -142,7 +178,8 @@ irqe = linux_irq_ent(dev, irq); if (irqe == NULL) return; - bus_teardown_intr(dev->bsddev, irqe->res, irqe->tag); + if (irqe->tag) + bus_teardown_intr(dev->bsddev, irqe->res, irqe->tag); bus_release_resource(dev->bsddev, SYS_RES_IRQ, rid, irqe->res); list_del(&irqe->links); kfree(irqe); @@ -169,4 +206,7 @@ extern void tasklet_init(struct tasklet_struct *, tasklet_func_t *, unsigned long data); +extern void tasklet_enable(struct tasklet_struct *); +extern void tasklet_disable(struct tasklet_struct *); + #endif /* _LINUX_INTERRUPT_H_ */ Index: sys/compat/linuxkpi/common/include/linux/irqreturn.h =================================================================== --- /dev/null +++ sys/compat/linuxkpi/common/include/linux/irqreturn.h @@ -0,0 +1,40 @@ +/*- + * Copyright (c) 2017 Limelight Networks, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ +#ifndef _LINUX_IRQRETURN_NEXT_H +#define _LINUX_IRQRETURN_NEXT_H + +enum irqreturn { + IRQ_NONE = (0 << 0), + IRQ_HANDLED = (1 << 0), + IRQ_WAKE_THREAD = (1 << 1), +}; + +typedef enum irqreturn irqreturn_t; +#define IRQ_RETVAL(x) ((x) ? IRQ_HANDLED : IRQ_NONE) + +#endif Index: sys/compat/linuxkpi/common/include/linux/kernel.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/kernel.h +++ sys/compat/linuxkpi/common/include/linux/kernel.h @@ -54,7 +54,7 @@ #include -#define KERN_CONT "" +#define KERN_CONT "" #define KERN_EMERG "<0>" #define KERN_ALERT "<1>" #define KERN_CRIT "<2>" @@ -89,6 +89,7 @@ #define BUILD_BUG_ON(x) CTASSERT(!(x)) #define BUILD_BUG_ON_MSG(x, msg) BUILD_BUG_ON(x) #define BUILD_BUG_ON_NOT_POWER_OF_2(x) BUILD_BUG_ON(!powerof2(x)) +#define BUILD_BUG_ON_INVALID(expr) ((void)(sizeof((__force long)(expr)))) #define BUG() panic("BUG at %s:%d", __FILE__, __LINE__) #define BUG_ON(cond) do { \ Index: sys/compat/linuxkpi/common/include/linux/kref.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/kref.h +++ sys/compat/linuxkpi/common/include/linux/kref.h @@ -52,6 +52,12 @@ refcount_init(&kref->refcount.counter, 1); } +static inline unsigned int +kref_read(const struct kref *kref) +{ + return (atomic_read(&kref->refcount)); +} + static inline void kref_get(struct kref *kref) { @@ -65,9 +71,9 @@ if (refcount_release(&kref->refcount.counter)) { rel(kref); - return 1; + return (1); } - return 0; + return (0); } static inline int @@ -78,17 +84,17 @@ while (count--) { if (refcount_release(&kref->refcount.counter)) { rel(kref); - return 1; + return (1); } } - return 0; + return (0); } static inline int __must_check kref_get_unless_zero(struct kref *kref) { - return atomic_add_unless(&kref->refcount, 1, 0); + return (atomic_add_unless(&kref->refcount, 1, 0)); } static inline int kref_put_mutex(struct kref *kref, @@ -99,12 +105,12 @@ mutex_lock(lock); if (unlikely(!atomic_dec_and_test(&kref->refcount))) { mutex_unlock(lock); - return 0; + return (0); } release(kref); - return 1; + return (1); } - return 0; + return (0); } #endif /* _LINUX_KREF_H_ */ Index: sys/compat/linuxkpi/common/include/linux/ktime.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/ktime.h +++ sys/compat/linuxkpi/common/include/linux/ktime.h @@ -37,11 +37,7 @@ #define ktime_get_ts(x) getnanouptime(x) /* time values in nanoseconds */ -union ktime { - int64_t tv64; -}; - -typedef union ktime ktime_t; +typedef s64 ktime_t; #define KTIME_MAX ((s64)~((u64)1 << 63)) #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) @@ -49,40 +45,37 @@ static inline int64_t ktime_to_ns(ktime_t kt) { - return kt.tv64; + return (kt); } -static inline ktime_t +static inline s64 ns_to_ktime(uint64_t nsec) { - ktime_t kt; - - kt.tv64 = nsec; - return (kt); + return (nsec); } static inline int64_t ktime_divns(const ktime_t kt, int64_t div) { - return kt.tv64 / div; + return (kt / div); } static inline int64_t ktime_to_us(ktime_t kt) { - return ktime_divns(kt, NSEC_PER_USEC); + return (ktime_divns(kt, NSEC_PER_USEC)); } static inline int64_t ktime_to_ms(ktime_t kt) { - return ktime_divns(kt, NSEC_PER_MSEC); + return (ktime_divns(kt, NSEC_PER_MSEC)); } static inline struct timeval ktime_to_timeval(ktime_t kt) { - return ns_to_timeval(kt.tv64); + return (ns_to_timeval(kt)); } static inline ktime_t @@ -90,8 +83,8 @@ { ktime_t res; - res.tv64 = kt.tv64 + ns; - return kt; + res = kt + ns; + return (kt); } static inline ktime_t @@ -99,8 +92,8 @@ { ktime_t res; - res.tv64 = kt.tv64 - ns; - return kt; + res = kt - ns; + return (kt); } static inline ktime_t @@ -113,7 +106,7 @@ static inline ktime_t ktime_sub(ktime_t lhs, ktime_t rhs) { - lhs.tv64 -= rhs.tv64; + lhs -= rhs; return (lhs); } @@ -121,20 +114,20 @@ ktime_us_delta(ktime_t later, ktime_t earlier) { ktime_t diff = ktime_sub(later, earlier); - return ktime_to_us(diff); + return (ktime_to_us(diff)); } static inline int64_t ktime_ms_delta(ktime_t later, ktime_t earlier) { ktime_t diff = ktime_sub(later, earlier); - return ktime_to_ms(diff); + return (ktime_to_ms(diff)); } static inline ktime_t ktime_add(ktime_t lhs, ktime_t rhs) { - lhs.tv64 += rhs.tv64; + lhs += rhs; return (lhs); } @@ -150,9 +143,9 @@ return (ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC)); } -#define ktime_to_timespec(kt) ns_to_timespec((kt).tv64) -#define ktime_to_timeval(kt) ns_to_timeval((kt).tv64) -#define ktime_to_ns(kt) ((kt).tv64) +#define ktime_to_timespec(kt) ns_to_timespec((kt)) +#define ktime_to_timeval(kt) ns_to_timeval((kt)) +#define ktime_to_ns(kt) (kt) static inline int64_t ktime_get_ns(void) @@ -165,8 +158,6 @@ return (ktime_to_ns(kt)); } -#define ktime_get_raw_ns() ktime_get_ns() - static inline ktime_t ktime_get(void) { @@ -194,4 +185,25 @@ return (timespec_to_ktime(ts)); } +static inline ktime_t +ktime_get_real_seconds(void) +{ + struct timespec ts; + ktime_t kt; + + nanotime(&ts); + kt = ts.tv_sec; + return (kt); +} + +static inline u64 +ktime_get_raw_ns(void) +{ + struct timespec ts; + + nanouptime(&ts); + + return ((ts.tv_sec * NSEC_PER_SEC) + ts.tv_nsec); +} + #endif /* _LINUX_KTIME_H */ Index: sys/compat/linuxkpi/common/include/linux/list.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/list.h +++ sys/compat/linuxkpi/common/include/linux/list.h @@ -70,13 +70,17 @@ #include #include +#ifdef DDB +#include +#endif + #ifndef prefetch #define prefetch(x) #endif -#define LINUX_LIST_HEAD_INIT(name) { &(name), &(name) } +#define LINUX_LIST_HEAD_INIT(name) { &(name), &(name) } -#define LINUX_LIST_HEAD(name) \ +#define LINUX_LIST_HEAD(name) \ struct list_head name = LINUX_LIST_HEAD_INIT(name) #ifndef LIST_HEAD_DEF @@ -116,6 +120,13 @@ WRITE_ONCE(prev->next, next); } +static inline void +__list_del_entry(struct list_head *entry) +{ + + __list_del(entry->prev, entry->next); +} + static inline void list_del(struct list_head *entry) { @@ -218,6 +229,9 @@ #define list_for_each_prev(p, h) for (p = (h)->prev; p != (h); p = (p)->prev) +#define list_safe_reset_next(pos, n, member) \ + n = list_entry((pos)->member.next, typeof(*pos), member) + static inline void list_add(struct list_head *new, struct list_head *head) { Index: sys/compat/linuxkpi/common/include/linux/lockdep.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/lockdep.h +++ sys/compat/linuxkpi/common/include/linux/lockdep.h @@ -32,6 +32,14 @@ #ifndef _LINUX_LOCKDEP_H_ #define _LINUX_LOCKDEP_H_ +/* + * In Linux lockdep_* functions are NOOP if CONFIG_LOCKDEP is not enabled. + * Since we do not fully support LOCKDEP yet, we do not enable CONFIG_LOCKDEP. + * However, for the functions that have implementation below, + * #if IS_ENABLED(CONFIG_LOCKDEP) found in Linux code can be overridden to + * provide some lock assertion. + */ + struct lock_class_key { }; @@ -48,5 +56,11 @@ #define lockdep_is_held(m) (sx_xholder(&(m)->sx) == curthread) #define might_lock(m) do { } while (0) +#define might_lock_read(lock) do { } while (0) + +#define lock_acquire(l, s, t, r, c, n, i) do { } while (0) +#define lock_release(l, n, i) do { } while (0) +#define lock_acquire_shared_recursive(l, s, t, n, i) \ + lock_acquire(l, s, t, 2, 1, n, i) #endif /* _LINUX_LOCKDEP_H_ */ Index: sys/compat/linuxkpi/common/include/linux/mm.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/mm.h +++ sys/compat/linuxkpi/common/include/linux/mm.h @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -118,14 +119,15 @@ struct vm_fault { unsigned int flags; pgoff_t pgoff; - void *virtual_address; /* user-space address */ + unsigned long address; /* user-space address */ struct page *page; + struct vm_area_struct *vma; }; struct vm_operations_struct { void (*open) (struct vm_area_struct *); void (*close) (struct vm_area_struct *); - int (*fault) (struct vm_area_struct *, struct vm_fault *); + int (*fault) (struct vm_fault *); }; /* @@ -237,7 +239,7 @@ get_user_pages_remote(struct task_struct *, struct mm_struct *, unsigned long start, unsigned long nr_pages, int gup_flags, struct page **, - struct vm_area_struct **); + struct vm_area_struct **, int *locked); static inline void put_page(struct vm_page *page) Index: sys/compat/linuxkpi/common/include/linux/mm_types.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/mm_types.h +++ sys/compat/linuxkpi/common/include/linux/mm_types.h @@ -62,6 +62,12 @@ mmdrop(mm); } +static inline void +mmgrab(struct mm_struct *mm) +{ + atomic_inc(&mm->mm_count); +} + extern struct mm_struct *linux_get_task_mm(struct task_struct *); #define get_task_mm(task) linux_get_task_mm(task) Index: sys/compat/linuxkpi/common/include/linux/mmzone.h =================================================================== --- /dev/null +++ sys/compat/linuxkpi/common/include/linux/mmzone.h @@ -0,0 +1,33 @@ +/*- + * Copyright (c) 2017 Limelight Networks, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ +#ifndef _LINUX_MMZONE_H_ +#define _LINUX_MMZONE_H_ + +#define MAX_ORDER 11 + +#endif Index: sys/compat/linuxkpi/common/include/linux/mutex.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/mutex.h +++ sys/compat/linuxkpi/common/include/linux/mutex.h @@ -128,4 +128,19 @@ sx_destroy(&m->sx); } -#endif /* _LINUX_MUTEX_H_ */ +enum mutex_trylock_recursive_enum { + MUTEX_TRYLOCK_FAILED = 0, + MUTEX_TRYLOCK_SUCCESS = 1, + MUTEX_TRYLOCK_RECURSIVE, +}; + +static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum +mutex_trylock_recursive(struct mutex *lock) +{ + if (unlikely(sx_xholder(&lock->sx) == curthread)) + return (MUTEX_TRYLOCK_RECURSIVE); + + return (mutex_trylock(lock)); +} + +#endif /* _LINUX_MUTEX_H_ */ Index: sys/compat/linuxkpi/common/include/linux/pid.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/pid.h +++ sys/compat/linuxkpi/common/include/linux/pid.h @@ -58,6 +58,11 @@ __ts; \ }) +#define get_task_pid(task, type) ({ \ + CTASSERT((type) == PIDTYPE_PID); \ + curthread->td_proc->p_pid; \ +}) + struct task_struct; extern struct task_struct *linux_pid_task(pid_t); extern struct task_struct *linux_get_pid_task(pid_t); Index: sys/compat/linuxkpi/common/include/linux/printk.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/printk.h +++ sys/compat/linuxkpi/common/include/linux/printk.h @@ -34,11 +34,12 @@ #include /* GID printing macros */ -#define GID_PRINT_FMT "%.4x:%.4x:%.4x:%.4x:%.4x:%.4x:%.4x:%.4x" -#define GID_PRINT_ARGS(gid_raw) htons(((u16 *)gid_raw)[0]), htons(((u16 *)gid_raw)[1]),\ - htons(((u16 *)gid_raw)[2]), htons(((u16 *)gid_raw)[3]),\ - htons(((u16 *)gid_raw)[4]), htons(((u16 *)gid_raw)[5]),\ - htons(((u16 *)gid_raw)[6]), htons(((u16 *)gid_raw)[7]) +#define GID_PRINT_FMT "%.4x:%.4x:%.4x:%.4x:%.4x:%.4x:%.4x:%.4x" +#define GID_PRINT_ARGS(gid_raw) \ + htons(((u16 *)gid_raw)[0]), htons(((u16 *)gid_raw)[1]), \ + htons(((u16 *)gid_raw)[2]), htons(((u16 *)gid_raw)[3]), \ + htons(((u16 *)gid_raw)[4]), htons(((u16 *)gid_raw)[5]), \ + htons(((u16 *)gid_raw)[6]), htons(((u16 *)gid_raw)[7]) enum { DUMP_PREFIX_NONE, @@ -106,10 +107,20 @@ print_hex_dump(NULL, prefix_str, prefix_type, 16, 1, buf, len, 0); } -#define printk_ratelimited(...) do { \ - static linux_ratelimit_t __ratelimited; \ - if (linux_ratelimited(&__ratelimited)) \ - printk(__VA_ARGS__); \ -} while (0) +static inline int +printk_ratelimit() { + // XXX: Used in amdgpu/gmc_v{6,7,8}_0.c + // Return 0 means no dev_err output. + return (1); +} + +#define printk_ratelimited(...) ({ \ + static linux_ratelimit_t __ratelimited; \ + int __retval; \ + __retval = linux_ratelimited(&__ratelimited); \ + if (__retval) \ + printk(__VA_ARGS__); \ + __retval; \ +}) #endif /* _LINUX_PRINTK_H_ */ Index: sys/compat/linuxkpi/common/include/linux/radix-tree.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/radix-tree.h +++ sys/compat/linuxkpi/common/include/linux/radix-tree.h @@ -32,6 +32,7 @@ #define _LINUX_RADIX_TREE_H_ #include +#include #define RADIX_TREE_MAP_SHIFT 6 #define RADIX_TREE_MAP_SIZE (1 << RADIX_TREE_MAP_SHIFT) @@ -39,9 +40,15 @@ #define RADIX_TREE_MAX_HEIGHT \ DIV_ROUND_UP((sizeof(long) * NBBY), RADIX_TREE_MAP_SHIFT) +#define RADIX_TREE_MAX_TAGS 3 +#define RADIX_TREE_TAG_LONGS \ + ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG) + struct radix_tree_node { - void *slots[RADIX_TREE_MAP_SIZE]; - int count; + unsigned char shift; + void *slots[RADIX_TREE_MAP_SIZE]; + int count; + unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; }; struct radix_tree_root { Index: sys/compat/linuxkpi/common/include/linux/rcupdate.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/rcupdate.h +++ sys/compat/linuxkpi/common/include/linux/rcupdate.h @@ -82,6 +82,9 @@ #define rcu_dereference(p) \ rcu_dereference_protected(p, 0) +#define rcu_dereference_raw(p) \ + ((__typeof(*p) *)(p)) + #define rcu_pointer_handoff(p) (p) #define rcu_assign_pointer(p, v) do { \ Index: sys/compat/linuxkpi/common/include/linux/slab.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/slab.h +++ sys/compat/linuxkpi/common/include/linux/slab.h @@ -65,6 +65,11 @@ #define kmem_cache_free(...) linux_kmem_cache_free(__VA_ARGS__) #define kmem_cache_destroy(...) linux_kmem_cache_destroy(__VA_ARGS__) +#define KMEM_CACHE(__struct, __flags) \ + kmem_cache_create(#__struct, \ + sizeof(struct __struct), __alignof__(struct __struct), \ + (__flags), NULL) + typedef void linux_kmem_ctor_t (void *); struct linux_kmem_cache { Index: sys/compat/linuxkpi/common/include/linux/spinlock.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/spinlock.h +++ sys/compat/linuxkpi/common/include/linux/spinlock.h @@ -98,6 +98,9 @@ __ret; \ }) +#define spin_trylock_irq(_l) \ + spin_trylock(_l) + #define spin_lock_nested(_l, _n) do { \ if (SPIN_SKIP()) \ break; \ Index: sys/compat/linuxkpi/common/include/linux/string.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/string.h +++ sys/compat/linuxkpi/common/include/linux/string.h @@ -70,6 +70,22 @@ return (retval); } +static inline void * +memdup_user_nul(const void *ptr, size_t len) +{ + char *retval; + int error; + + retval = malloc(len + 1, M_KMALLOC, M_WAITOK); + error = linux_copyin(ptr, retval, len); + if (error != 0) { + free(retval, M_KMALLOC); + return (ERR_PTR(error)); + } + retval[len] = '\0'; + return (retval); +} + static inline void * kmemdup(const void *src, size_t len, gfp_t gfp) { Index: sys/compat/linuxkpi/common/include/linux/uaccess.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/uaccess.h +++ sys/compat/linuxkpi/common/include/linux/uaccess.h @@ -58,7 +58,7 @@ linux_copyout(&(__x), (_p), sizeof(*(_p))); \ }) #define get_user(_x, _p) linux_copyin((_p), &(_x), sizeof(*(_p))) -#define put_user(_x, _p) linux_copyout(&(_x), (_p), sizeof(*(_p))) +#define put_user(_x, _p) __put_user(_x, _p) #define clear_user(...) linux_clear_user(__VA_ARGS__) #define access_ok(...) linux_access_ok(__VA_ARGS__) Index: sys/compat/linuxkpi/common/include/linux/wait.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/wait.h +++ sys/compat/linuxkpi/common/include/linux/wait.h @@ -36,6 +36,7 @@ #include #include #include +#include #include @@ -96,11 +97,17 @@ }; \ MTX_SYSINIT(name, &(name).lock.m, spin_lock_name("wqhead"), MTX_DEF) -#define init_waitqueue_head(wqh) do { \ - mtx_init(&(wqh)->lock.m, spin_lock_name("wqhead"), \ - NULL, MTX_DEF | MTX_NEW | MTX_NOWITNESS); \ - INIT_LIST_HEAD(&(wqh)->task_list); \ -} while (0) +static inline void +__init_waitqueue_head(wait_queue_head_t *wqh, __unused const void *name, + __unused void *key) { + mtx_init(&(wqh)->lock.m, spin_lock_name("wqhead"), NULL, + MTX_DEF | MTX_NEW | MTX_NOWITNESS); + INIT_LIST_HEAD(&(wqh)->task_list); +} + +#define init_waitqueue_head(wqh) do { \ + __init_waitqueue_head(wqh, NULL, NULL); \ + } while (0) void linux_wake_up(wait_queue_head_t *, unsigned int, int, bool); Index: sys/compat/linuxkpi/common/src/linux_compat.c =================================================================== --- sys/compat/linuxkpi/common/src/linux_compat.c +++ sys/compat/linuxkpi/common/src/linux_compat.c @@ -189,7 +189,7 @@ } if (error) sysfs_remove_dir(kobj); - + } return (error); } @@ -545,20 +545,21 @@ struct vm_fault vmf; /* fill out VM fault structure */ - vmf.virtual_address = (void *)((uintptr_t)pidx << PAGE_SHIFT); + vmf.address = (unsigned long)((uintptr_t)pidx << PAGE_SHIFT); vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; vmf.pgoff = 0; vmf.page = NULL; + vmf.vma = vmap; vmap->vm_pfn_count = 0; vmap->vm_pfn_pcount = &vmap->vm_pfn_count; vmap->vm_obj = vm_obj; - err = vmap->vm_ops->fault(vmap, &vmf); + err = vmap->vm_ops->fault(&vmf); while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { kern_yield(PRI_USER); - err = vmap->vm_ops->fault(vmap, &vmf); + err = vmap->vm_ops->fault(&vmf); } } @@ -1712,7 +1713,7 @@ { timer->expires = expires; - callout_reset(&timer->timer_callout, + callout_reset(&timer->timer_callout, linux_timer_jiffies_until(expires), &linux_timer_callback_wrapper, timer); } Index: sys/compat/linuxkpi/common/src/linux_hrtimer.c =================================================================== --- sys/compat/linuxkpi/common/src/linux_hrtimer.c +++ sys/compat/linuxkpi/common/src/linux_hrtimer.c @@ -98,7 +98,7 @@ { mtx_lock(&hrtimer->mtx); - callout_reset_sbt(&hrtimer->callout, nstosbt(time.tv64), nstosbt(nsec), + callout_reset_sbt(&hrtimer->callout, nstosbt(time), nstosbt(nsec), hrtimer_call_handler, hrtimer, 0); mtx_unlock(&hrtimer->mtx); } Index: sys/compat/linuxkpi/common/src/linux_page.c =================================================================== --- sys/compat/linuxkpi/common/src/linux_page.c +++ sys/compat/linuxkpi/common/src/linux_page.c @@ -267,7 +267,7 @@ long get_user_pages_remote(struct task_struct *task, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int gup_flags, - struct page **pages, struct vm_area_struct **vmas) + struct page **pages, struct vm_area_struct **vmas, int *locked) { vm_map_t map; Index: sys/compat/linuxkpi/common/src/linux_tasklet.c =================================================================== --- sys/compat/linuxkpi/common/src/linux_tasklet.c +++ sys/compat/linuxkpi/common/src/linux_tasklet.c @@ -41,6 +41,7 @@ #define TASKLET_ST_BUSY 1 #define TASKLET_ST_EXEC 2 #define TASKLET_ST_LOOP 3 +#define TASKLET_ST_PAUSED 4 #define TASKLET_ST_CMPSET(ts, old, new) \ atomic_cmpset_ptr((volatile uintptr_t *)&(ts)->entry.tqe_prev, old, new) @@ -196,3 +197,21 @@ while (TASKLET_ST_GET(ts) != TASKLET_ST_IDLE) pause("W", 1); } + +void +tasklet_enable(struct tasklet_struct *ts) +{ + (void) TASKLET_ST_CMPSET(ts, TASKLET_ST_PAUSED, TASKLET_ST_IDLE); +} + +void +tasklet_disable(struct tasklet_struct *ts) +{ + while (1) { + if (TASKLET_ST_GET(ts) == TASKLET_ST_PAUSED) + break; + if (TASKLET_ST_CMPSET(ts, TASKLET_ST_IDLE, TASKLET_ST_PAUSED)) + break; + pause("W", 1); + } +}