Index: sys/compat/linuxkpi/common/include/asm/atomic.h =================================================================== --- sys/compat/linuxkpi/common/include/asm/atomic.h +++ sys/compat/linuxkpi/common/include/asm/atomic.h @@ -159,36 +159,43 @@ return (ret); } -#define cmpxchg(ptr, old, new) ({ \ - __typeof(*(ptr)) __ret; \ - \ - CTASSERT(sizeof(__ret) == 1 || sizeof(__ret) == 2 || \ - sizeof(__ret) == 4 || sizeof(__ret) == 8); \ - \ - __ret = (old); \ - switch (sizeof(__ret)) { \ - case 1: \ - while (!atomic_fcmpset_8((volatile int8_t *)(ptr), \ - (int8_t *)&__ret, (new)) && __ret == (old)) \ - ; \ - break; \ - case 2: \ - while (!atomic_fcmpset_16((volatile int16_t *)(ptr), \ - (int16_t *)&__ret, (new)) && __ret == (old)) \ - ; \ - break; \ - case 4: \ - while (!atomic_fcmpset_32((volatile int32_t *)(ptr), \ - (int32_t *)&__ret, (new)) && __ret == (old)) \ - ; \ - break; \ - case 8: \ - while (!atomic_fcmpset_64((volatile int64_t *)(ptr), \ - (int64_t *)&__ret, (new)) && __ret == (old)) \ - ; \ - break; \ - } \ - __ret; \ +#define cmpxchg(ptr, old, new) ({ \ + union { \ + __typeof(*(ptr)) val; \ + u8 u8[0]; \ + u16 u16[0]; \ + u32 u32[0]; \ + u64 u64[0]; \ + } __ret = { .val = (old) }, __new = { .val = (new) }; \ + \ + CTASSERT(sizeof(__ret) == 1 || sizeof(__ret) == 2 || \ + sizeof(__ret) == 4 || sizeof(__ret) == 8); \ + \ + switch (sizeof(__ret.val)) { \ + case 1: \ + while (!atomic_fcmpset_8((volatile u8 *)(ptr), \ + __ret.u8, __new.u8[0]) && __ret.val == (old)) \ + ; \ + break; \ + case 2: \ + while (!atomic_fcmpset_16((volatile u16 *)(ptr), \ + __ret.u16, __new.u16[0]) && __ret.val == (old)) \ + ; \ + break; \ + case 4: \ + while (!atomic_fcmpset_32((volatile u32 *)(ptr), \ + __ret.u32, __new.u32[0]) && __ret.val == (old)) \ + ; \ + break; \ + case 8: \ + while (!atomic_fcmpset_64((volatile u64 *)(ptr), \ + __ret.u64, __new.u64[0]) && __ret.val == (old)) \ + ; \ + break; \ + default: \ + break; \ + } \ + __ret.val; \ }) #define cmpxchg_relaxed(...) cmpxchg(__VA_ARGS__) Index: sys/compat/linuxkpi/common/include/linux/compiler.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/compiler.h +++ sys/compat/linuxkpi/common/include/linux/compiler.h @@ -56,6 +56,8 @@ #define __devexit #define __exit #define __rcu +#define __percpu +#define __weak __attribute__((weak)) #define __malloc #define ___stringify(...) #__VA_ARGS__ #define __stringify(...) ___stringify(__VA_ARGS__) Index: sys/compat/linuxkpi/common/include/linux/dcache.h =================================================================== --- /dev/null +++ sys/compat/linuxkpi/common/include/linux/dcache.h @@ -0,0 +1,41 @@ +#ifndef __LINUX_DCACHE_H +#define __LINUX_DCACHE_H + +#include +#include +#include +#include +#include +#include + +struct inode; +struct dentry; +struct path; +struct pfs_node; +#define HASH_LEN_DECLARE u32 hash; u32 len + +struct qstr { + union { + struct { + HASH_LEN_DECLARE; + }; + u64 hash_len; + }; + const unsigned char *name; +}; + +struct dentry { + struct inode *d_inode; + struct dentry *d_parent; /* parent directory */ + struct qstr d_name; + /* FreeBSD */ + struct pfs_node *d_pfs_node; +}; + + +static inline struct inode *d_inode(const struct dentry *dentry) +{ + return dentry->d_inode; +} + +#endif Index: sys/compat/linuxkpi/common/include/linux/device.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/device.h +++ sys/compat/linuxkpi/common/include/linux/device.h @@ -46,9 +46,6 @@ #include -enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED, IRQ_WAKE_THREAD, }; -typedef enum irqreturn irqreturn_t; - struct device; struct fwnode_handle; @@ -184,7 +181,7 @@ #define dev_notice(dev, fmt, ...) device_printf((dev)->bsddev, fmt, ##__VA_ARGS__) #define dev_dbg(dev, fmt, ...) do { } while (0) #define dev_printk(lvl, dev, fmt, ...) \ - device_printf((dev)->bsddev, fmt, ##__VA_ARGS__) + device_printf(((const struct device *)dev)->bsddev, fmt, ##__VA_ARGS__) #define dev_err_ratelimited(dev, ...) do { \ static linux_ratelimit_t __ratelimited; \ Index: sys/compat/linuxkpi/common/include/linux/fs.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/fs.h +++ sys/compat/linuxkpi/common/include/linux/fs.h @@ -42,6 +42,7 @@ #include #include #include +#include struct module; struct kiocb; @@ -65,11 +66,6 @@ typedef struct files_struct *fl_owner_t; -struct dentry { - struct inode *d_inode; - struct pfs_node *d_pfs_node; -}; - struct file_operations; struct linux_file_wait_queue { @@ -288,6 +284,18 @@ return (file->_file->f_offset); } +static inline struct inode * +file_inode(const struct file *f) +{ + return f->f_inode; +} + +static inline int +call_mmap(struct file *file, struct vm_area_struct *vma) +{ + return file->f_op->mmap(file, vma); +} + /* Shared memory support */ unsigned long linux_invalidate_mapping_pages(vm_object_t, pgoff_t, pgoff_t); struct page *linux_shmem_read_mapping_page_gfp(vm_object_t, int, gfp_t); Index: sys/compat/linuxkpi/common/include/linux/gfp.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/gfp.h +++ sys/compat/linuxkpi/common/include/linux/gfp.h @@ -55,6 +55,9 @@ #define __GFP_WAIT M_WAITOK #define __GFP_DMA32 (1U << 24) /* LinuxKPI only */ +#define __GFP_BITS_SHIFT 25 +#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) + #define GFP_NOWAIT M_NOWAIT #define GFP_ATOMIC (M_NOWAIT | M_USE_RESERVE) #define GFP_KERNEL M_WAITOK Index: sys/compat/linuxkpi/common/include/linux/hardirq.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/hardirq.h +++ sys/compat/linuxkpi/common/include/linux/hardirq.h @@ -38,5 +38,7 @@ #include #define synchronize_irq(irq) _intr_drain((irq)) +#define disable_irq(irq) do { } while (0) +#define enable_irq(irq) do { } while (0) #endif /* _LINUX_HARDIRQ_H_ */ Index: sys/compat/linuxkpi/common/include/linux/interrupt.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/interrupt.h +++ sys/compat/linuxkpi/common/include/linux/interrupt.h @@ -33,13 +33,13 @@ #include #include +#include #include #include -typedef irqreturn_t (*irq_handler_t)(int, void *); -#define IRQ_RETVAL(x) ((x) != IRQ_NONE) +typedef irqreturn_t (*irq_handler_t)(int, void *); #define IRQF_SHARED RF_SHAREABLE @@ -169,4 +169,7 @@ extern void tasklet_init(struct tasklet_struct *, tasklet_func_t *, unsigned long data); +extern void tasklet_enable(struct tasklet_struct *); +extern void tasklet_disable(struct tasklet_struct *); + #endif /* _LINUX_INTERRUPT_H_ */ Index: sys/compat/linuxkpi/common/include/linux/irqreturn.h =================================================================== --- /dev/null +++ sys/compat/linuxkpi/common/include/linux/irqreturn.h @@ -0,0 +1,14 @@ +#ifndef _LINUX_IRQRETURN_NEXT_H +#define _LINUX_IRQRETURN_NEXT_H + +enum irqreturn { + IRQ_NONE = (0 << 0), + IRQ_HANDLED = (1 << 0), + IRQ_WAKE_THREAD = (1 << 1), +}; + +typedef enum irqreturn irqreturn_t; +#define IRQ_RETVAL(x) ((x) ? IRQ_HANDLED : IRQ_NONE) + + +#endif Index: sys/compat/linuxkpi/common/include/linux/kernel.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/kernel.h +++ sys/compat/linuxkpi/common/include/linux/kernel.h @@ -89,6 +89,7 @@ #define BUILD_BUG_ON(x) CTASSERT(!(x)) #define BUILD_BUG_ON_MSG(x, msg) BUILD_BUG_ON(x) #define BUILD_BUG_ON_NOT_POWER_OF_2(x) BUILD_BUG_ON(!powerof2(x)) +#define BUILD_BUG_ON_INVALID(expr) ((void)(sizeof((__force long)(expr)))) #define BUG() panic("BUG at %s:%d", __FILE__, __LINE__) #define BUG_ON(cond) do { \ Index: sys/compat/linuxkpi/common/include/linux/kref.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/kref.h +++ sys/compat/linuxkpi/common/include/linux/kref.h @@ -52,6 +52,12 @@ refcount_init(&kref->refcount.counter, 1); } +static inline unsigned int +kref_read(const struct kref *kref) +{ + return atomic_read(&kref->refcount); +} + static inline void kref_get(struct kref *kref) { Index: sys/compat/linuxkpi/common/include/linux/ktime.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/ktime.h +++ sys/compat/linuxkpi/common/include/linux/ktime.h @@ -37,11 +37,7 @@ #define ktime_get_ts(x) getnanouptime(x) /* time values in nanoseconds */ -union ktime { - int64_t tv64; -}; - -typedef union ktime ktime_t; +typedef s64 ktime_t; #define KTIME_MAX ((s64)~((u64)1 << 63)) #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) @@ -49,22 +45,19 @@ static inline int64_t ktime_to_ns(ktime_t kt) { - return kt.tv64; + return kt; } -static inline ktime_t +static inline s64 ns_to_ktime(uint64_t nsec) { - ktime_t kt; - - kt.tv64 = nsec; - return (kt); + return (nsec); } static inline int64_t ktime_divns(const ktime_t kt, int64_t div) { - return kt.tv64 / div; + return kt / div; } static inline int64_t @@ -82,7 +75,7 @@ static inline struct timeval ktime_to_timeval(ktime_t kt) { - return ns_to_timeval(kt.tv64); + return ns_to_timeval(kt); } static inline ktime_t @@ -90,7 +83,7 @@ { ktime_t res; - res.tv64 = kt.tv64 + ns; + res = kt + ns; return kt; } @@ -99,7 +92,7 @@ { ktime_t res; - res.tv64 = kt.tv64 - ns; + res = kt - ns; return kt; } @@ -113,7 +106,7 @@ static inline ktime_t ktime_sub(ktime_t lhs, ktime_t rhs) { - lhs.tv64 -= rhs.tv64; + lhs -= rhs; return (lhs); } @@ -134,7 +127,7 @@ static inline ktime_t ktime_add(ktime_t lhs, ktime_t rhs) { - lhs.tv64 += rhs.tv64; + lhs += rhs; return (lhs); } @@ -150,9 +143,9 @@ return (ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC)); } -#define ktime_to_timespec(kt) ns_to_timespec((kt).tv64) -#define ktime_to_timeval(kt) ns_to_timeval((kt).tv64) -#define ktime_to_ns(kt) ((kt).tv64) +#define ktime_to_timespec(kt) ns_to_timespec((kt)) +#define ktime_to_timeval(kt) ns_to_timeval((kt)) +#define ktime_to_ns(kt) (kt) static inline int64_t ktime_get_ns(void) @@ -165,8 +158,6 @@ return (ktime_to_ns(kt)); } -#define ktime_get_raw_ns() ktime_get_ns() - static inline ktime_t ktime_get(void) { @@ -194,4 +185,25 @@ return (timespec_to_ktime(ts)); } +static inline ktime_t +ktime_get_real_seconds(void) +{ + struct timespec ts; + ktime_t kt; + + nanotime(&ts); + kt = ts.tv_sec; + return (kt); +} + +static inline u64 +ktime_get_raw_ns(void) +{ + struct timespec ts; + + nanouptime(&ts); + + return (ts.tv_sec * NSEC_PER_SEC) + ts.tv_nsec; +} + #endif /* _LINUX_KTIME_H */ Index: sys/compat/linuxkpi/common/include/linux/list.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/list.h +++ sys/compat/linuxkpi/common/include/linux/list.h @@ -70,6 +70,10 @@ #include #include +#ifdef DDB +#include +#endif + #ifndef prefetch #define prefetch(x) #endif @@ -116,6 +120,15 @@ WRITE_ONCE(prev->next, next); } +static inline void +__list_del_entry(struct list_head *entry) +{ + // if (!__list_del_entry_valid(entry)) + // return; + + __list_del(entry->prev, entry->next); +} + static inline void list_del(struct list_head *entry) { @@ -218,6 +231,9 @@ #define list_for_each_prev(p, h) for (p = (h)->prev; p != (h); p = (p)->prev) +#define list_safe_reset_next(pos, n, member) \ + n = list_entry(pos->member.next, typeof(*pos), member) + static inline void list_add(struct list_head *new, struct list_head *head) { Index: sys/compat/linuxkpi/common/include/linux/lockdep.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/lockdep.h +++ sys/compat/linuxkpi/common/include/linux/lockdep.h @@ -47,6 +47,13 @@ #define lockdep_is_held(m) (sx_xholder(&(m)->sx) == curthread) +// XXX: experimental (johalun 20170913) +#define lock_is_held(m) lockdep_is_held(m) + #define might_lock(m) do { } while (0) +#define might_lock_read(lock) do { } while (0) + +#define lock_acquire(l, s, t, r, c, n, i) do { } while (0) +#define lock_release(l, n, i) do { } while (0) #endif /* _LINUX_LOCKDEP_H_ */ Index: sys/compat/linuxkpi/common/include/linux/mm.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/mm.h +++ sys/compat/linuxkpi/common/include/linux/mm.h @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -118,14 +119,15 @@ struct vm_fault { unsigned int flags; pgoff_t pgoff; - void *virtual_address; /* user-space address */ + unsigned long address; /* user-space address */ struct page *page; + struct vm_area_struct *vma; }; struct vm_operations_struct { void (*open) (struct vm_area_struct *); void (*close) (struct vm_area_struct *); - int (*fault) (struct vm_area_struct *, struct vm_fault *); + int (*fault) (struct vm_fault *); }; /* @@ -237,7 +239,7 @@ get_user_pages_remote(struct task_struct *, struct mm_struct *, unsigned long start, unsigned long nr_pages, int gup_flags, struct page **, - struct vm_area_struct **); + struct vm_area_struct **, int *locked); static inline void put_page(struct vm_page *page) Index: sys/compat/linuxkpi/common/include/linux/mm_types.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/mm_types.h +++ sys/compat/linuxkpi/common/include/linux/mm_types.h @@ -62,6 +62,12 @@ mmdrop(mm); } +static inline void +mmgrab(struct mm_struct *mm) +{ + atomic_inc(&mm->mm_count); +} + extern struct mm_struct *linux_get_task_mm(struct task_struct *); #define get_task_mm(task) linux_get_task_mm(task) Index: sys/compat/linuxkpi/common/include/linux/mmzone.h =================================================================== --- /dev/null +++ sys/compat/linuxkpi/common/include/linux/mmzone.h @@ -0,0 +1,10 @@ +#ifndef _LINUX_MMZONE_H_ +#define _LINUX_MMZONE_H_ + + +// Needed by i915_gem_internal.c +#define MAX_ORDER 11 + + + +#endif Index: sys/compat/linuxkpi/common/include/linux/mutex.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/mutex.h +++ sys/compat/linuxkpi/common/include/linux/mutex.h @@ -128,4 +128,19 @@ sx_destroy(&m->sx); } -#endif /* _LINUX_MUTEX_H_ */ +enum mutex_trylock_recursive_enum { + MUTEX_TRYLOCK_FAILED = 0, + MUTEX_TRYLOCK_SUCCESS = 1, + MUTEX_TRYLOCK_RECURSIVE, +}; + +static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum +mutex_trylock_recursive(struct mutex *lock) +{ + if (unlikely(sx_xholder(&lock->sx) == curthread)) + return MUTEX_TRYLOCK_RECURSIVE; + + return mutex_trylock(lock); +} + +#endif /* _LINUX_MUTEX_H_ */ Index: sys/compat/linuxkpi/common/include/linux/pid.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/pid.h +++ sys/compat/linuxkpi/common/include/linux/pid.h @@ -52,10 +52,15 @@ }) #define get_pid_task(pid, type) ({ \ - struct task_struct *__ts; \ + struct task_struct *__ts; \ CTASSERT((type) == PIDTYPE_PID); \ __ts = linux_get_pid_task(pid); \ - __ts; \ + __ts; \ +}) + +#define get_task_pid(task, type) ({ \ + CTASSERT((type) == PIDTYPE_PID); \ + curthread->td_tid; \ }) struct task_struct; Index: sys/compat/linuxkpi/common/include/linux/printk.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/printk.h +++ sys/compat/linuxkpi/common/include/linux/printk.h @@ -106,6 +106,13 @@ print_hex_dump(NULL, prefix_str, prefix_type, 16, 1, buf, len, 0); } +static inline int +printk_ratelimit() { + // XXX: Used in amdgpu/gmc_v{6,7,8}_0.c + // Return 0 means no dev_err output. + return 1; +} + #define printk_ratelimited(...) do { \ static linux_ratelimit_t __ratelimited; \ if (linux_ratelimited(&__ratelimited)) \ Index: sys/compat/linuxkpi/common/include/linux/radix-tree.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/radix-tree.h +++ sys/compat/linuxkpi/common/include/linux/radix-tree.h @@ -32,6 +32,11 @@ #define _LINUX_RADIX_TREE_H_ #include +#include +#include +#include + +#define RADIX_TREE_MAX_TAGS 3 #define RADIX_TREE_MAP_SHIFT 6 #define RADIX_TREE_MAP_SIZE (1 << RADIX_TREE_MAP_SHIFT) @@ -39,9 +44,14 @@ #define RADIX_TREE_MAX_HEIGHT \ DIV_ROUND_UP((sizeof(long) * NBBY), RADIX_TREE_MAP_SHIFT) +#define RADIX_TREE_TAG_LONGS \ + ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG) + struct radix_tree_node { + unsigned char shift; void *slots[RADIX_TREE_MAP_SIZE]; int count; + unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; }; struct radix_tree_root { @@ -61,4 +71,180 @@ void *radix_tree_delete(struct radix_tree_root *, unsigned long); int radix_tree_insert(struct radix_tree_root *, unsigned long, void *); + + + + +// Radix Tree Iterator impl +// Review needed! + +#define RADIX_TREE_ENTRY_MASK 3UL +#define RADIX_TREE_INTERNAL_NODE 1UL +#define RADIX_TREE_EXCEPTIONAL_ENTRY 2 +#define RADIX_TREE_EXCEPTIONAL_SHIFT 2 +#define ROOT_IS_IDR ((__force gfp_t)(1 << __GFP_BITS_SHIFT)) +#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT + 1) + +struct radix_tree_iter { + unsigned long index; + unsigned long next_index; + unsigned long tags; + struct radix_tree_node *node; +#ifdef CONFIG_RADIX_TREE_MULTIORDER // Probably not using this? + unsigned int shift; +#endif +}; + +enum { + RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */ + RADIX_TREE_ITER_TAGGED = 0x10, /* lookup tagged slots */ + RADIX_TREE_ITER_CONTIG = 0x20, /* stop at first hole */ +}; + +static inline int radix_tree_exception(void *arg) +{ + return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); +} + +static inline bool radix_tree_is_internal_node(void *ptr) +{ + return ((unsigned long)ptr & RADIX_TREE_ENTRY_MASK) == + RADIX_TREE_INTERNAL_NODE; +} + +static inline unsigned int iter_shift(const struct radix_tree_iter *iter) +{ +#ifdef CONFIG_RADIX_TREE_MULTIORDER + return iter->shift; +#else + return 0; +#endif +} + + +void __rcu **radix_tree_next_chunk(const struct radix_tree_root *, + struct radix_tree_iter *iter, unsigned flags); + + + +static __always_inline void __rcu ** +radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) +{ + /* + * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it + * in the case of a successful tagged chunk lookup. If the lookup was + * unsuccessful or non-tagged then nobody cares about ->tags. + * + * Set index to zero to bypass next_index overflow protection. + * See the comment in radix_tree_next_chunk() for details. + */ + iter->index = 0; + iter->next_index = start; + return NULL; +} + + +static inline void __rcu ** +radix_tree_iter_lookup(const struct radix_tree_root *root, + struct radix_tree_iter *iter, unsigned long index) +{ + radix_tree_iter_init(iter, index); + return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG); +} + +static inline void __rcu ** +radix_tree_iter_find(const struct radix_tree_root *root, + struct radix_tree_iter *iter, unsigned long index) +{ + radix_tree_iter_init(iter, index); + return radix_tree_next_chunk(root, iter, 0); +} + +static inline __must_check +void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter) +{ + iter->next_index = iter->index; + iter->tags = 0; + return NULL; +} + +static inline unsigned long +__radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) +{ + return iter->index + (slots << iter_shift(iter)); +} + +void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot, + struct radix_tree_iter *iter); + +static __always_inline long +radix_tree_chunk_size(struct radix_tree_iter *iter) +{ + return (iter->next_index - iter->index) >> iter_shift(iter); +} + +#ifdef CONFIG_RADIX_TREE_MULTIORDER +void __rcu **__radix_tree_next_slot(void __rcu **slot, + struct radix_tree_iter *iter, unsigned flags); +#else +/* Can't happen without sibling entries, but the compiler can't tell that */ +static inline void __rcu **__radix_tree_next_slot(void __rcu **slot, + struct radix_tree_iter *iter, unsigned flags) +{ + return slot; +} +#endif + +static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot, + struct radix_tree_iter *iter, unsigned flags) +{ + if (flags & RADIX_TREE_ITER_TAGGED) { + iter->tags >>= 1; + if (unlikely(!iter->tags)) + return NULL; + if (likely(iter->tags & 1ul)) { + iter->index = __radix_tree_iter_add(iter, 1); + slot++; + goto found; + } + if (!(flags & RADIX_TREE_ITER_CONTIG)) { + unsigned offset = __ffs(iter->tags); + + iter->tags >>= offset++; + iter->index = __radix_tree_iter_add(iter, offset); + slot += offset; + goto found; + } + } else { + long count = radix_tree_chunk_size(iter); + + while (--count > 0) { + slot++; + iter->index = __radix_tree_iter_add(iter, 1); + + if (likely(*slot)) + goto found; + if (flags & RADIX_TREE_ITER_CONTIG) { + /* forbid switching to the next chunk */ + iter->next_index = 0; + break; + } + } + } + return NULL; + + found: + if (unlikely(radix_tree_is_internal_node(rcu_dereference_raw(*slot)))) + return __radix_tree_next_slot(slot, iter, flags); + return slot; +} + + +#define radix_tree_for_each_slot(slot, root, iter, start) \ + for (slot = radix_tree_iter_init(iter, start) ; \ + slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \ + slot = radix_tree_next_slot(slot, iter, 0)) + + + #endif /* _LINUX_RADIX_TREE_H_ */ Index: sys/compat/linuxkpi/common/include/linux/rcupdate.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/rcupdate.h +++ sys/compat/linuxkpi/common/include/linux/rcupdate.h @@ -82,6 +82,11 @@ #define rcu_dereference(p) \ rcu_dereference_protected(p, 0) +// Review needed! +// Added rcu_dereference_raw, is this OK? +#define rcu_dereference_raw(p) \ + ((__typeof(*p) *)(p)) + #define rcu_pointer_handoff(p) (p) #define rcu_assign_pointer(p, v) do { \ Index: sys/compat/linuxkpi/common/include/linux/slab.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/slab.h +++ sys/compat/linuxkpi/common/include/linux/slab.h @@ -65,6 +65,11 @@ #define kmem_cache_free(...) linux_kmem_cache_free(__VA_ARGS__) #define kmem_cache_destroy(...) linux_kmem_cache_destroy(__VA_ARGS__) +#define KMEM_CACHE(__struct, __flags) \ + kmem_cache_create(#__struct, \ + sizeof(struct __struct), __alignof__(struct __struct), \ + (__flags), NULL) + typedef void linux_kmem_ctor_t (void *); struct linux_kmem_cache { Index: sys/compat/linuxkpi/common/include/linux/spinlock.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/spinlock.h +++ sys/compat/linuxkpi/common/include/linux/spinlock.h @@ -98,6 +98,9 @@ __ret; \ }) +#define spin_trylock_irq(_l) \ + spin_trylock(_l) + #define spin_lock_nested(_l, _n) do { \ if (SPIN_SKIP()) \ break; \ Index: sys/compat/linuxkpi/common/include/linux/string.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/string.h +++ sys/compat/linuxkpi/common/include/linux/string.h @@ -70,6 +70,22 @@ return (retval); } +static inline void * +memdup_user_nul(const void *ptr, size_t len) +{ + char *retval; + int error; + + retval = malloc(len + 1, M_KMALLOC, M_WAITOK); + error = linux_copyin(ptr, retval, len); + if (error != 0) { + free(retval, M_KMALLOC); + return (ERR_PTR(error)); + } + retval[len] = '\0'; + return (retval); +} + static inline void * kmemdup(const void *src, size_t len, gfp_t gfp) { Index: sys/compat/linuxkpi/common/include/linux/uaccess.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/uaccess.h +++ sys/compat/linuxkpi/common/include/linux/uaccess.h @@ -58,7 +58,7 @@ linux_copyout(&(__x), (_p), sizeof(*(_p))); \ }) #define get_user(_x, _p) linux_copyin((_p), &(_x), sizeof(*(_p))) -#define put_user(_x, _p) linux_copyout(&(_x), (_p), sizeof(*(_p))) +#define put_user(_x, _p) __put_user(_x, _p) #define clear_user(...) linux_clear_user(__VA_ARGS__) #define access_ok(...) linux_access_ok(__VA_ARGS__) Index: sys/compat/linuxkpi/common/include/linux/wait.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/wait.h +++ sys/compat/linuxkpi/common/include/linux/wait.h @@ -36,6 +36,7 @@ #include #include #include +#include #include @@ -96,10 +97,14 @@ }; \ MTX_SYSINIT(name, &(name).lock.m, spin_lock_name("wqhead"), MTX_DEF) -#define init_waitqueue_head(wqh) do { \ - mtx_init(&(wqh)->lock.m, spin_lock_name("wqhead"), \ - NULL, MTX_DEF | MTX_NEW | MTX_NOWITNESS); \ - INIT_LIST_HEAD(&(wqh)->task_list); \ +static inline void +__init_waitqueue_head(wait_queue_head_t *wqh, __unused const void *name, __unused void *key) { + mtx_init(&(wqh)->lock.m, spin_lock_name("wqhead"), NULL, MTX_DEF | MTX_NEW | MTX_NOWITNESS); + INIT_LIST_HEAD(&(wqh)->task_list); +} + +#define init_waitqueue_head(wqh) do { \ + __init_waitqueue_head(wqh, NULL, NULL); \ } while (0) void linux_wake_up(wait_queue_head_t *, unsigned int, int, bool); @@ -133,8 +138,10 @@ \ for (;;) { \ linux_prepare_to_wait(&(wqh), &__wq, state); \ - if (cond) \ + if (cond) { \ + __ret = 1; \ break; \ + } \ __ret = linux_wait_event_common(&(wqh), &__wq, \ __timeout, state, lock); \ if (__ret != 0) \ Index: sys/compat/linuxkpi/common/src/linux_compat.c =================================================================== --- sys/compat/linuxkpi/common/src/linux_compat.c +++ sys/compat/linuxkpi/common/src/linux_compat.c @@ -189,7 +189,7 @@ } if (error) sysfs_remove_dir(kobj); - + } return (error); } @@ -545,20 +545,21 @@ struct vm_fault vmf; /* fill out VM fault structure */ - vmf.virtual_address = (void *)((uintptr_t)pidx << PAGE_SHIFT); + vmf.address = (unsigned long)((uintptr_t)pidx << PAGE_SHIFT); vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; vmf.pgoff = 0; vmf.page = NULL; + vmf.vma = vmap; vmap->vm_pfn_count = 0; vmap->vm_pfn_pcount = &vmap->vm_pfn_count; vmap->vm_obj = vm_obj; - err = vmap->vm_ops->fault(vmap, &vmf); + err = vmap->vm_ops->fault(&vmf); while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { kern_yield(PRI_USER); - err = vmap->vm_ops->fault(vmap, &vmf); + err = vmap->vm_ops->fault(&vmf); } } @@ -1712,7 +1713,7 @@ { timer->expires = expires; - callout_reset(&timer->timer_callout, + callout_reset(&timer->timer_callout, linux_timer_jiffies_until(expires), &linux_timer_callback_wrapper, timer); } Index: sys/compat/linuxkpi/common/src/linux_hrtimer.c =================================================================== --- sys/compat/linuxkpi/common/src/linux_hrtimer.c +++ sys/compat/linuxkpi/common/src/linux_hrtimer.c @@ -98,7 +98,7 @@ { mtx_lock(&hrtimer->mtx); - callout_reset_sbt(&hrtimer->callout, nstosbt(time.tv64), nstosbt(nsec), + callout_reset_sbt(&hrtimer->callout, nstosbt(time), nstosbt(nsec), hrtimer_call_handler, hrtimer, 0); mtx_unlock(&hrtimer->mtx); } Index: sys/compat/linuxkpi/common/src/linux_page.c =================================================================== --- sys/compat/linuxkpi/common/src/linux_page.c +++ sys/compat/linuxkpi/common/src/linux_page.c @@ -267,7 +267,7 @@ long get_user_pages_remote(struct task_struct *task, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int gup_flags, - struct page **pages, struct vm_area_struct **vmas) + struct page **pages, struct vm_area_struct **vmas, int *locked) { vm_map_t map; Index: sys/compat/linuxkpi/common/src/linux_radix.c =================================================================== --- sys/compat/linuxkpi/common/src/linux_radix.c +++ sys/compat/linuxkpi/common/src/linux_radix.c @@ -216,3 +216,250 @@ return (0); } + + + + +// Radix Tree Iterator impl +// Review needed! + +static inline void __set_iter_shift(struct radix_tree_iter *iter, + unsigned int shift) +{ +#ifdef CONFIG_RADIX_TREE_MULTIORDER + iter->shift = shift; +#endif +} + +static unsigned int iter_offset(const struct radix_tree_iter *iter) +{ + return (iter->index >> iter_shift(iter)) & RADIX_TREE_MAP_MASK; +} + +static inline struct radix_tree_node *entry_to_node(void *ptr) +{ + return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE); +} + +static inline unsigned long shift_maxindex(unsigned int shift) +{ + return (RADIX_TREE_MAP_SIZE << shift) - 1; +} + +static inline unsigned long node_maxindex(const struct radix_tree_node *node) +{ + return shift_maxindex(node->shift); +} + +static unsigned radix_tree_load_root(const struct radix_tree_root *root, + struct radix_tree_node **nodep, unsigned long *maxindex) +{ + struct radix_tree_node *node = rcu_dereference_raw(root->rnode); + + *nodep = node; + + if (likely(radix_tree_is_internal_node(node))) { + node = entry_to_node(node); + *maxindex = node_maxindex(node); + return node->shift + RADIX_TREE_MAP_SHIFT; + } + + *maxindex = 0; + return 0; +} + +static __always_inline unsigned long +radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag, + unsigned long offset) +{ + const unsigned long *addr = node->tags[tag]; + + if (offset < RADIX_TREE_MAP_SIZE) { + unsigned long tmp; + + addr += offset / BITS_PER_LONG; + tmp = *addr >> (offset % BITS_PER_LONG); + if (tmp) + return __ffs(tmp) + offset; + offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1); + while (offset < RADIX_TREE_MAP_SIZE) { + tmp = *++addr; + if (tmp) + return __ffs(tmp) + offset; + offset += BITS_PER_LONG; + } + } + return RADIX_TREE_MAP_SIZE; +} + +static inline void *node_to_entry(void *ptr) +{ + return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE); +} + +#define RADIX_TREE_RETRY node_to_entry(NULL) + +#ifdef CONFIG_RADIX_TREE_MULTIORDER +/* Sibling slots point directly to another slot in the same node */ +static inline +bool is_sibling_entry(const struct radix_tree_node *parent, void *node) +{ + void __rcu **ptr = node; + return (parent->slots <= ptr) && + (ptr < parent->slots + RADIX_TREE_MAP_SIZE); +} +#else +static inline +bool is_sibling_entry(const struct radix_tree_node *parent, void *node) +{ + return false; +} +#endif + +/* Construct iter->tags bit-mask from node->tags[tag] array */ +static void set_iter_tags(struct radix_tree_iter *iter, + struct radix_tree_node *node, unsigned offset, + unsigned tag) +{ + unsigned tag_long = offset / BITS_PER_LONG; + unsigned tag_bit = offset % BITS_PER_LONG; + + if (!node) { + iter->tags = 1; + return; + } + + iter->tags = node->tags[tag][tag_long] >> tag_bit; + + /* This never happens if RADIX_TREE_TAG_LONGS == 1 */ + if (tag_long < RADIX_TREE_TAG_LONGS - 1) { + /* Pick tags from next element */ + if (tag_bit) + iter->tags |= node->tags[tag][tag_long + 1] << + (BITS_PER_LONG - tag_bit); + /* Clip chunk size, here only BITS_PER_LONG tags */ + iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG); + } +} + +static inline int tag_get(const struct radix_tree_node *node, unsigned int tag, + int offset) +{ + return test_bit(offset, __DECONST(unsigned long *, node->tags[tag])); +} + +static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag) +{ + return (__force int)root->gfp_mask & (1 << (tag + ROOT_TAG_SHIFT)); +} + +static unsigned int radix_tree_descend(const struct radix_tree_node *parent, + struct radix_tree_node **nodep, unsigned long index) +{ + unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK; + void __rcu **entry = rcu_dereference_raw(parent->slots[offset]); + +#ifdef CONFIG_RADIX_TREE_MULTIORDER + if (radix_tree_is_internal_node(entry)) { + if (is_sibling_entry(parent, entry)) { + void __rcu **sibentry; + sibentry = (void __rcu **) entry_to_node(entry); + offset = get_slot_offset(parent, sibentry); + entry = rcu_dereference_raw(*sibentry); + } + } +#endif + + *nodep = (void *)entry; + return offset; +} + +void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root, + struct radix_tree_iter *iter, unsigned flags) +{ + unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; + struct radix_tree_node *node, *child; + unsigned long index, offset, maxindex; + + if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag)) + return NULL; + + /* + * Catch next_index overflow after ~0UL. iter->index never overflows + * during iterating; it can be zero only at the beginning. + * And we cannot overflow iter->next_index in a single step, + * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG. + * + * This condition also used by radix_tree_next_slot() to stop + * contiguous iterating, and forbid switching to the next chunk. + */ + index = iter->next_index; + if (!index && iter->index) + return NULL; + + restart: + radix_tree_load_root(root, &child, &maxindex); + if (index > maxindex) + return NULL; + if (!child) + return NULL; + + if (!radix_tree_is_internal_node(child)) { + /* Single-slot tree */ + iter->index = index; + iter->next_index = maxindex + 1; + iter->tags = 1; + iter->node = NULL; + __set_iter_shift(iter, 0); + return __DECONST(void __rcu **, &root->rnode); + } + + do { + node = entry_to_node(child); + offset = radix_tree_descend(node, &child, index); + + if ((flags & RADIX_TREE_ITER_TAGGED) ? + !tag_get(node, tag, offset) : !child) { + /* Hole detected */ + if (flags & RADIX_TREE_ITER_CONTIG) + return NULL; + + if (flags & RADIX_TREE_ITER_TAGGED) + offset = radix_tree_find_next_bit(node, tag, + offset + 1); + else + while (++offset < RADIX_TREE_MAP_SIZE) { + void *slot = rcu_dereference_raw( + node->slots[offset]); + if (is_sibling_entry(node, slot)) + continue; + if (slot) + break; + } + index &= ~node_maxindex(node); + index += offset << node->shift; + /* Overflow after ~0UL */ + if (!index) + return NULL; + if (offset == RADIX_TREE_MAP_SIZE) + goto restart; + child = rcu_dereference_raw(node->slots[offset]); + } + + if (!child) + goto restart; + if (child == RADIX_TREE_RETRY) + break; + } while (radix_tree_is_internal_node(child)); + + /* Update the iterator state */ + iter->index = (index &~ node_maxindex(node)) | (offset << node->shift); + iter->next_index = (index | node_maxindex(node)) + 1; + iter->node = node; + __set_iter_shift(iter, node->shift); + + if (flags & RADIX_TREE_ITER_TAGGED) + set_iter_tags(iter, node, offset, tag); + + return node->slots + offset; +} Index: sys/compat/linuxkpi/common/src/linux_tasklet.c =================================================================== --- sys/compat/linuxkpi/common/src/linux_tasklet.c +++ sys/compat/linuxkpi/common/src/linux_tasklet.c @@ -196,3 +196,15 @@ while (TASKLET_ST_GET(ts) != TASKLET_ST_IDLE) pause("W", 1); } + +void tasklet_enable(struct tasklet_struct *ts) +{ + struct tasklet_worker *tw; + tw = &DPCPU_GET(tasklet_worker); +} + +void tasklet_disable(struct tasklet_struct *ts) +{ + struct tasklet_worker *tw; + tw = &DPCPU_GET(tasklet_worker); +}