Index: head/sys/compat/linuxkpi/common/include/linux/io-mapping.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/io-mapping.h (revision 320195) +++ head/sys/compat/linuxkpi/common/include/linux/io-mapping.h (revision 320196) @@ -1,79 +1,112 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ -#ifndef _LINUX_IO_MAPPING_H_ + +#ifndef _LINUX_IO_MAPPING_H_ #define _LINUX_IO_MAPPING_H_ +#include +#include + #include #include +#include -struct io_mapping; +struct io_mapping { + unsigned long base; + unsigned long size; + void *mem; + vm_memattr_t attr; +}; static inline struct io_mapping * +io_mapping_init_wc(struct io_mapping *mapping, resource_size_t base, + unsigned long size) +{ + + mapping->base = base; + mapping->size = size; + mapping->mem = ioremap_wc(base, size); + mapping->attr = VM_MEMATTR_WRITE_COMBINING; + return (mapping); +} + +static inline struct io_mapping * io_mapping_create_wc(resource_size_t base, unsigned long size) { + struct io_mapping *mapping; - return ioremap_wc(base, size); + mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); + if (mapping == NULL) + return (NULL); + return (io_mapping_init_wc(mapping, base, size)); } static inline void +io_mapping_fini(struct io_mapping *mapping) +{ + + iounmap(mapping->mem); +} + +static inline void io_mapping_free(struct io_mapping *mapping) { - iounmap(mapping); + io_mapping_fini(mapping->mem); + kfree(mapping); } static inline void * io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset) { - return (((char *)mapping) + offset); + return ((char *)mapping->mem + offset); } static inline void io_mapping_unmap_atomic(void *vaddr) { - } static inline void * -io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) +io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset, + unsigned long size) { - return (((char *) mapping) + offset); + return ((char *)mapping->mem + offset); } static inline void io_mapping_unmap(void *vaddr) { - } -#endif /* _LINUX_IO_MAPPING_H_ */ +#endif /* _LINUX_IO_MAPPING_H_ */ Index: head/sys/dev/mlx4/mlx4_core/mlx4_pd.c =================================================================== --- head/sys/dev/mlx4/mlx4_core/mlx4_pd.c (revision 320195) +++ head/sys/dev/mlx4/mlx4_core/mlx4_pd.c (revision 320196) @@ -1,302 +1,302 @@ /* * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2005, 2014 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include #include #include #include #include "mlx4.h" #include "icm.h" enum { MLX4_NUM_RESERVED_UARS = 8 }; int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn) { struct mlx4_priv *priv = mlx4_priv(dev); *pdn = mlx4_bitmap_alloc(&priv->pd_bitmap); if (*pdn == -1) return -ENOMEM; return 0; } EXPORT_SYMBOL_GPL(mlx4_pd_alloc); void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn) { mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn, MLX4_USE_RR); } EXPORT_SYMBOL_GPL(mlx4_pd_free); int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn) { struct mlx4_priv *priv = mlx4_priv(dev); *xrcdn = mlx4_bitmap_alloc(&priv->xrcd_bitmap); if (*xrcdn == -1) return -ENOMEM; return 0; } int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn) { u64 out_param; int err; if (mlx4_is_mfunc(dev)) { err = mlx4_cmd_imm(dev, 0, &out_param, RES_XRCD, RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (err) return err; *xrcdn = get_param_l(&out_param); return 0; } return __mlx4_xrcd_alloc(dev, xrcdn); } EXPORT_SYMBOL_GPL(mlx4_xrcd_alloc); void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) { mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn, MLX4_USE_RR); } void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) { u64 in_param = 0; int err; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, xrcdn); err = mlx4_cmd(dev, in_param, RES_XRCD, RES_OP_RESERVE, MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (err) mlx4_warn(dev, "Failed to release xrcdn %d\n", xrcdn); } else __mlx4_xrcd_free(dev, xrcdn); } EXPORT_SYMBOL_GPL(mlx4_xrcd_free); int mlx4_init_pd_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds, (1 << NOT_MASKED_PD_BITS) - 1, dev->caps.reserved_pds, 0); } void mlx4_cleanup_pd_table(struct mlx4_dev *dev) { mlx4_bitmap_cleanup(&mlx4_priv(dev)->pd_bitmap); } int mlx4_init_xrcd_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); return mlx4_bitmap_init(&priv->xrcd_bitmap, (1 << 16), (1 << 16) - 1, dev->caps.reserved_xrcds + 1, 0); } void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev) { mlx4_bitmap_cleanup(&mlx4_priv(dev)->xrcd_bitmap); } int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar) { int offset; uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap); if (uar->index == -1) return -ENOMEM; if (mlx4_is_slave(dev)) offset = uar->index % ((int) pci_resource_len(dev->pdev, 2) / dev->caps.uar_page_size); else offset = uar->index; uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + offset; uar->map = NULL; return 0; } EXPORT_SYMBOL_GPL(mlx4_uar_alloc); void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar) { mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index, MLX4_USE_RR); } EXPORT_SYMBOL_GPL(mlx4_uar_free); #ifndef CONFIG_PPC int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_uar *uar; int err = 0; int idx; if (!priv->bf_mapping) return -ENOMEM; mutex_lock(&priv->bf_mutex); if (!list_empty(&priv->bf_list)) uar = list_entry(priv->bf_list.next, struct mlx4_uar, bf_list); else { if (mlx4_bitmap_avail(&priv->uar_table.bitmap) < MLX4_NUM_RESERVED_UARS) { err = -ENOMEM; goto out; } uar = kmalloc_node(sizeof *uar, GFP_KERNEL, node); if (!uar) { uar = kmalloc(sizeof *uar, GFP_KERNEL); if (!uar) { err = -ENOMEM; goto out; } } err = mlx4_uar_alloc(dev, uar); if (err) goto free_kmalloc; uar->map = ioremap(uar->pfn << PAGE_SHIFT, PAGE_SIZE); if (!uar->map) { err = -ENOMEM; goto free_uar; } - uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT); + uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT, PAGE_SIZE); if (!uar->bf_map) { err = -ENOMEM; goto unamp_uar; } uar->free_bf_bmap = 0; list_add(&uar->bf_list, &priv->bf_list); } bf->uar = uar; idx = ffz(uar->free_bf_bmap); uar->free_bf_bmap |= 1 << idx; bf->uar = uar; bf->offset = 0; bf->buf_size = dev->caps.bf_reg_size / 2; bf->reg = uar->bf_map + idx * dev->caps.bf_reg_size; if (uar->free_bf_bmap == (1 << dev->caps.bf_regs_per_page) - 1) list_del_init(&uar->bf_list); goto out; unamp_uar: bf->uar = NULL; iounmap(uar->map); free_uar: mlx4_uar_free(dev, uar); free_kmalloc: kfree(uar); out: mutex_unlock(&priv->bf_mutex); return err; } EXPORT_SYMBOL_GPL(mlx4_bf_alloc); void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf) { struct mlx4_priv *priv = mlx4_priv(dev); int idx; if (!bf->uar || !bf->uar->bf_map) return; mutex_lock(&priv->bf_mutex); idx = (bf->reg - bf->uar->bf_map) / dev->caps.bf_reg_size; bf->uar->free_bf_bmap &= ~(1 << idx); if (!bf->uar->free_bf_bmap) { if (!list_empty(&bf->uar->bf_list)) list_del(&bf->uar->bf_list); io_mapping_unmap(bf->uar->bf_map); iounmap(bf->uar->map); mlx4_uar_free(dev, bf->uar); kfree(bf->uar); } else if (list_empty(&bf->uar->bf_list)) list_add(&bf->uar->bf_list, &priv->bf_list); mutex_unlock(&priv->bf_mutex); } EXPORT_SYMBOL_GPL(mlx4_bf_free); #else int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node) { memset(bf, 0, sizeof *bf); return -ENOSYS; } EXPORT_SYMBOL_GPL(mlx4_bf_alloc); void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf) { return; } EXPORT_SYMBOL_GPL(mlx4_bf_free); #endif int mlx4_init_uar_table(struct mlx4_dev *dev) { if (dev->caps.num_uars <= 128) { mlx4_err(dev, "Only %d UAR pages (need more than 128)\n", dev->caps.num_uars); mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n"); return -ENODEV; } return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap, dev->caps.num_uars, dev->caps.num_uars - 1, dev->caps.reserved_uars, 0); } void mlx4_cleanup_uar_table(struct mlx4_dev *dev) { mlx4_bitmap_cleanup(&mlx4_priv(dev)->uar_table.bitmap); } Index: head/sys/dev/mlx5/mlx5_core/mlx5_uar.c =================================================================== --- head/sys/dev/mlx5/mlx5_core/mlx5_uar.c (revision 320195) +++ head/sys/dev/mlx5/mlx5_core/mlx5_uar.c (revision 320196) @@ -1,209 +1,210 @@ /*- * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include "mlx5_core.h" int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn) { u32 in[MLX5_ST_SZ_DW(alloc_uar_in)]; u32 out[MLX5_ST_SZ_DW(alloc_uar_out)]; int err; memset(in, 0, sizeof(in)); MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR); memset(out, 0, sizeof(out)); err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); if (err) return err; *uarn = MLX5_GET(alloc_uar_out, out, uar); return 0; } EXPORT_SYMBOL(mlx5_cmd_alloc_uar); int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn) { u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)]; u32 out[MLX5_ST_SZ_DW(dealloc_uar_out)]; memset(in, 0, sizeof(in)); MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR); MLX5_SET(dealloc_uar_in, in, uar, uarn); memset(out, 0, sizeof(out)); return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_cmd_free_uar); static int need_uuar_lock(int uuarn) { int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE; if (uuarn == 0 || tot_uuars - NUM_LOW_LAT_UUARS) return 0; return 1; } int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) { int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE; struct mlx5_bf *bf; phys_addr_t addr; int err; int i; uuari->num_uars = NUM_DRIVER_UARS; uuari->num_low_latency_uuars = NUM_LOW_LAT_UUARS; mutex_init(&uuari->lock); uuari->uars = kcalloc(uuari->num_uars, sizeof(*uuari->uars), GFP_KERNEL); uuari->bfs = kcalloc(tot_uuars, sizeof(*uuari->bfs), GFP_KERNEL); uuari->bitmap = kcalloc(BITS_TO_LONGS(tot_uuars), sizeof(*uuari->bitmap), GFP_KERNEL); uuari->count = kcalloc(tot_uuars, sizeof(*uuari->count), GFP_KERNEL); for (i = 0; i < uuari->num_uars; i++) { err = mlx5_cmd_alloc_uar(dev, &uuari->uars[i].index); if (err) goto out_count; addr = pci_resource_start(dev->pdev, 0) + ((phys_addr_t)(uuari->uars[i].index) << PAGE_SHIFT); uuari->uars[i].map = ioremap(addr, PAGE_SIZE); if (!uuari->uars[i].map) { mlx5_cmd_free_uar(dev, uuari->uars[i].index); err = -ENOMEM; goto out_count; } mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n", uuari->uars[i].index, uuari->uars[i].map); } for (i = 0; i < tot_uuars; i++) { bf = &uuari->bfs[i]; bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2; bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE]; bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map; bf->reg = NULL; /* Add WC support */ bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) + MLX5_BF_OFFSET; bf->need_lock = need_uuar_lock(i); spin_lock_init(&bf->lock); spin_lock_init(&bf->lock32); bf->uuarn = i; } return 0; out_count: for (i--; i >= 0; i--) { iounmap(uuari->uars[i].map); mlx5_cmd_free_uar(dev, uuari->uars[i].index); } kfree(uuari->count); kfree(uuari->bitmap); kfree(uuari->bfs); kfree(uuari->uars); return err; } int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) { int i = uuari->num_uars; for (i--; i >= 0; i--) { iounmap(uuari->uars[i].map); mlx5_cmd_free_uar(dev, uuari->uars[i].index); } kfree(uuari->count); kfree(uuari->bitmap); kfree(uuari->bfs); kfree(uuari->uars); return 0; } int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) { phys_addr_t pfn; phys_addr_t uar_bar_start; int err; err = mlx5_cmd_alloc_uar(mdev, &uar->index); if (err) { mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err); return err; } uar_bar_start = pci_resource_start(mdev->pdev, 0); pfn = (uar_bar_start >> PAGE_SHIFT) + uar->index; uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); if (!uar->map) { mlx5_core_warn(mdev, "ioremap() failed, %d\n", err); err = -ENOMEM; goto err_free_uar; } if (mdev->priv.bf_mapping) uar->bf_map = io_mapping_map_wc(mdev->priv.bf_mapping, - uar->index << PAGE_SHIFT); + uar->index << PAGE_SHIFT, + PAGE_SIZE); return 0; err_free_uar: mlx5_cmd_free_uar(mdev, uar->index); return err; } EXPORT_SYMBOL(mlx5_alloc_map_uar); void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) { io_mapping_unmap(uar->bf_map); iounmap(uar->map); mlx5_cmd_free_uar(mdev, uar->index); } EXPORT_SYMBOL(mlx5_unmap_free_uar);