Index: sys/compat/linuxkpi/common/include/linux/radix-tree.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/radix-tree.h +++ sys/compat/linuxkpi/common/include/linux/radix-tree.h @@ -78,6 +78,7 @@ void *radix_tree_lookup(struct radix_tree_root *, unsigned long); void *radix_tree_delete(struct radix_tree_root *, unsigned long); int radix_tree_insert(struct radix_tree_root *, unsigned long, void *); +int radix_tree_store(struct radix_tree_root *, unsigned long, void **); bool radix_tree_iter_find(struct radix_tree_root *, struct radix_tree_iter *, void ***); void radix_tree_iter_delete(struct radix_tree_root *, struct radix_tree_iter *, void **); Index: sys/compat/linuxkpi/common/include/linux/xarray.h =================================================================== --- /dev/null +++ sys/compat/linuxkpi/common/include/linux/xarray.h @@ -0,0 +1,115 @@ +/*- + * Copyright (c) 2020 Mellanox Technologies, Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ +#ifndef _LINUX_XARRAY_H_ +#define _LINUX_XARRAY_H_ + +#include + +#include +#include +#include +#include +#include + +#define XA_LIMIT(min, max) \ + ({ CTASSERT((min) == 0); (uint32_t)(max); }) + +#define XA_FLAGS_ALLOC (1U << 0) +#define XA_FLAGS_LOCK_IRQ (1U << 1) + +#define XA_ERROR(x) \ + ERR_PTR(x) + +#define xa_limit_32b XA_LIMIT(0, -1U) + +struct xarray { + struct radix_tree_root root; + spinlock_t spinlock; + atomic_t use_lock; +}; + +/* + * Extensible arrays API implemented as a wrapper + * around the radix tree implementation. + */ +void xa_lock(struct xarray *); +void xa_unlock(struct xarray *); +void *xa_erase(struct xarray *, uint32_t); +void *xa_load(struct xarray *, uint32_t); +int xa_alloc(struct xarray *, uint32_t *, void *, uint32_t, gfp_t); +int xa_alloc_cyclic(struct xarray *, uint32_t *, void *, uint32_t, uint32_t *, gfp_t); +int xa_insert(struct xarray *, uint32_t, void *, gfp_t); +void *xa_store(struct xarray *, uint32_t, void *, gfp_t); +void xa_init_flags(struct xarray *, uint32_t); +bool xa_empty(struct xarray *); +void xa_destroy(struct xarray *); + +static inline void * +__xa_for_each(struct xarray *xa, unsigned long *pindex, bool not_first) +{ + struct radix_tree_iter iter = { .index = *pindex }; + void **ppslot; + + if (not_first) { + /* advance to next index, if any */ + iter.index++; + if (iter.index == 0) + return (NULL); + } + + if (radix_tree_iter_find(&xa->root, &iter, &ppslot)) { + *pindex = iter.index; + return (*ppslot); + } else { + return (NULL); + } +} + +#define xa_for_each(xa, index, entry) \ + for ((entry) = NULL, (index) = 0; \ + ((entry) = __xa_for_each(xa, &index, (entry) != NULL)) != NULL; ) + +/* + * Unlocked version of functions above. + * The current implementation detects this automagically. + */ +#define __xa_erase(...) xa_erase(__VA_ARGS__) +#define __xa_load(...) xa_load(__VA_ARGS__) +#define __xa_alloc(...) xa_alloc(__VA_ARGS__) +#define __xa_alloc_cyclic(...) xa_alloc_cyclic(__VA_ARGS__) +#define __xa_insert(...) xa_insert(__VA_ARGS__) +#define __xa_store(...) xa_store(__VA_ARGS__) +#define __xa_empty(...) xa_empty(__VA_ARGS_)_ + +static inline int +xa_err(void *ptr) +{ + return (PTR_ERR_OR_ZERO(ptr)); +} + +#endif /* _LINUX_XARRAY_H_ */ Index: sys/compat/linuxkpi/common/src/linux_radix.c =================================================================== --- sys/compat/linuxkpi/common/src/linux_radix.c +++ sys/compat/linuxkpi/common/src/linux_radix.c @@ -262,3 +262,102 @@ return (0); } + +int +radix_tree_store(struct radix_tree_root *root, unsigned long index, void **ppitem) +{ + struct radix_tree_node *node; + struct radix_tree_node *temp[RADIX_TREE_MAX_HEIGHT - 1]; + void *pitem; + int height; + int idx; + + /* check for deletion */ + if (*ppitem == NULL) { + *ppitem = radix_tree_delete(root, index); + return (0); + } + + /* get root node, if any */ + node = root->rnode; + + /* allocate root node, if any */ + if (node == NULL) { + node = malloc(sizeof(*node), M_RADIX, root->gfp_mask | M_ZERO); + if (node == NULL) + return (-ENOMEM); + root->rnode = node; + root->height++; + } + + /* expand radix tree as needed */ + while (radix_max(root) < index) { + + /* check if the radix tree is getting too big */ + if (root->height == RADIX_TREE_MAX_HEIGHT) + return (-E2BIG); + + /* + * If the root radix level is not empty, we need to + * allocate a new radix level: + */ + if (node->count != 0) { + node = malloc(sizeof(*node), M_RADIX, root->gfp_mask | M_ZERO); + if (node == NULL) + return (-ENOMEM); + node->slots[0] = root->rnode; + node->count++; + root->rnode = node; + } + root->height++; + } + + /* get radix tree height index */ + height = root->height - 1; + + /* walk down the tree until the first missing node, if any */ + for ( ; height != 0; height--) { + idx = radix_pos(index, height); + if (node->slots[idx] == NULL) + break; + node = node->slots[idx]; + } + + /* allocate the missing radix levels, if any */ + for (idx = 0; idx != height; idx++) { + temp[idx] = malloc(sizeof(*node), M_RADIX, + root->gfp_mask | M_ZERO); + if (temp[idx] == NULL) { + while(idx--) + free(temp[idx], M_RADIX); + /* Check if we should free the root node as well. */ + if (root->rnode->count == 0) { + free(root->rnode, M_RADIX); + root->rnode = NULL; + root->height = 0; + } + return (-ENOMEM); + } + } + + /* setup new radix levels, if any */ + for ( ; height != 0; height--) { + idx = radix_pos(index, height); + node->slots[idx] = temp[height - 1]; + node->count++; + node = node->slots[idx]; + } + + /* + * Insert and adjust count if the item does not already exist. + */ + idx = radix_pos(index, 0); + /* swap */ + pitem = node->slots[idx]; + node->slots[idx] = *ppitem; + *ppitem = pitem; + + if (pitem == NULL) + node->count++; + return (0); +} Index: sys/compat/linuxkpi/common/src/linux_xarray.c =================================================================== --- /dev/null +++ sys/compat/linuxkpi/common/src/linux_xarray.c @@ -0,0 +1,289 @@ +/*- + * Copyright (c) 2020 Mellanox Technologies, Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include + +#include + +void +xa_lock(struct xarray *xa) +{ + + MPASS(mtx_owned(&xa->spinlock.m) == false); + + rcu_read_lock(); + if (atomic_read(&xa->use_lock) != 0) + spin_lock(&xa->spinlock); +} + +void +xa_unlock(struct xarray *xa) +{ + + if (mtx_owned(&xa->spinlock.m)) + spin_unlock(&xa->spinlock); + rcu_read_unlock(); + + MPASS(mtx_owned(&xa->spinlock.m) == false); +} + +static void +xa_write_lock(struct xarray *xa) +{ + + if (atomic_inc(&xa->use_lock) == 1) + synchronize_rcu(); + spin_lock(&xa->spinlock); +} + +static void +xa_write_unlock(struct xarray *xa) +{ + + spin_unlock(&xa->spinlock); + atomic_dec(&xa->use_lock); +} + +void * +xa_erase(struct xarray *xa, uint32_t index) +{ + bool is_locked = mtx_owned(&xa->spinlock.m); + void *retval; + + if (likely(is_locked == false)) + xa_write_lock(xa); + retval = radix_tree_delete(&xa->root, index); + if (likely(is_locked == false)) + xa_write_unlock(xa); + + return (retval); +} + +void * +xa_load(struct xarray *xa, uint32_t index) +{ + bool is_locked = mtx_owned(&xa->spinlock.m); + void *retval; + + if (likely(is_locked == false)) + xa_lock(xa); + retval = radix_tree_lookup(&xa->root, index); + if (likely(is_locked == false)) + xa_unlock(xa); + + return (retval); +} + +static void +xa_vm_wait(struct xarray *xa) +{ + + /* + * Make sure we keep all the state AS-IS, except for dropping + * the spinlock: + */ + spin_unlock(&xa->spinlock); + vm_wait(NULL); + spin_lock(&xa->spinlock); +} + +int +xa_alloc(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask, gfp_t gfp) +{ + bool is_locked = mtx_owned(&xa->spinlock.m); + int retval; + + *pindex = 0; + + if (likely(is_locked == false)) + xa_write_lock(xa); +retry: + retval = radix_tree_insert(&xa->root, *pindex, ptr); + + switch (retval) { + case -EEXIST: + if (likely(*pindex != mask)) { + (*pindex)++; + goto retry; + } + retval = -ENOMEM; + break; + case -ENOMEM: + if (likely(gfp & M_WAITOK)) { + xa_vm_wait(xa); + goto retry; + } + break; + default: + break; + } + if (likely(is_locked == false)) + xa_write_unlock(xa); + + return (retval); +} + +int +xa_alloc_cyclic(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask, + uint32_t *pnext_index, gfp_t gfp) +{ + bool is_locked = mtx_owned(&xa->spinlock.m); + int retval; + int timeout = 1; + + *pnext_index = 0; + + if (likely(is_locked == false)) + xa_write_lock(xa); +retry: + retval = radix_tree_insert(&xa->root, *pnext_index, ptr); + + switch (retval) { + case -EEXIST: + if (unlikely(*pnext_index == mask) && !timeout--) { + retval = -ENOMEM; + break; + } + (*pnext_index)++; + (*pnext_index) &= mask; + goto retry; + case -ENOMEM: + if (likely(gfp & M_WAITOK)) { + xa_vm_wait(xa); + goto retry; + } + break; + default: + break; + } + *pindex = *pnext_index; + + if (likely(is_locked == false)) + xa_write_unlock(xa); + + return (retval); +} + +int +xa_insert(struct xarray *xa, uint32_t index, void *ptr, gfp_t gfp) +{ + bool is_locked = mtx_owned(&xa->spinlock.m); + int retval; + + if (likely(is_locked == false)) + xa_write_lock(xa); +retry: + retval = radix_tree_insert(&xa->root, index, ptr); + + switch (retval) { + case -ENOMEM: + if (likely(gfp & M_WAITOK)) { + xa_vm_wait(xa); + goto retry; + } + break; + default: + break; + } + + if (likely(is_locked == false)) + xa_write_unlock(xa); + + return (retval); +} + +void * +xa_store(struct xarray *xa, uint32_t index, void *ptr, gfp_t gfp) +{ + bool is_locked = mtx_owned(&xa->spinlock.m); + int retval; + + if (likely(is_locked == false)) + xa_write_lock(xa); +retry: + retval = radix_tree_store(&xa->root, index, &ptr); + + switch (retval) { + case 0: + break; + case -ENOMEM: + if (likely(gfp & M_WAITOK)) { + xa_vm_wait(xa); + goto retry; + } + ptr = XA_ERROR(retval); + break; + default: + ptr = XA_ERROR(retval); + break; + } + + if (likely(is_locked == false)) + xa_write_unlock(xa); + + return (ptr); +} + +void +xa_init_flags(struct xarray *xa, uint32_t flags) +{ + + memset(xa, 0, sizeof(*xa)); + + spin_lock_init(&xa->spinlock); + xa->root.gfp_mask = GFP_NOWAIT; +} + +void +xa_destroy(struct xarray *xa) +{ + struct radix_tree_iter iter; + void **ppslot; + + radix_tree_for_each_slot(ppslot, &xa->root, &iter, 0) + radix_tree_iter_delete(&xa->root, &iter, ppslot); + spin_lock_destroy(&xa->spinlock); +} + +bool +xa_empty(struct xarray *xa) +{ + bool is_locked = mtx_owned(&xa->spinlock.m); + struct radix_tree_iter iter = {}; + void **temp; + bool found; + + if (likely(is_locked == false)) + xa_lock(xa); + found = radix_tree_iter_find(&xa->root, &iter, &temp); + if (likely(is_locked == false)) + xa_unlock(xa); + + return (!found); +} Index: sys/modules/linuxkpi/Makefile =================================================================== --- sys/modules/linuxkpi/Makefile +++ sys/modules/linuxkpi/Makefile @@ -19,7 +19,9 @@ linux_shmemfs.c \ linux_idr.c \ linux_usb.c \ - linux_work.c + linux_work.c \ + linux_xarray.c + SRCS+= bus_if.h \ device_if.h \