Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c
Show All 39 Lines | |||||
#include <rdma/ib_user_verbs.h> | #include <rdma/ib_user_verbs.h> | ||||
#include <rdma/ib_addr.h> | #include <rdma/ib_addr.h> | ||||
#include <rdma/ib_cache.h> | #include <rdma/ib_cache.h> | ||||
#include <dev/mlx5/port.h> | #include <dev/mlx5/port.h> | ||||
#include <dev/mlx5/vport.h> | #include <dev/mlx5/vport.h> | ||||
#include <linux/list.h> | #include <linux/list.h> | ||||
#include <rdma/ib_smi.h> | #include <rdma/ib_smi.h> | ||||
#include <rdma/ib_umem.h> | #include <rdma/ib_umem.h> | ||||
#include <rdma/uverbs_ioctl.h> | |||||
#include <linux/in.h> | #include <linux/in.h> | ||||
#include <linux/etherdevice.h> | #include <linux/etherdevice.h> | ||||
#include <dev/mlx5/fs.h> | #include <dev/mlx5/fs.h> | ||||
#include "mlx5_ib.h" | #include "mlx5_ib.h" | ||||
#define DRIVER_NAME "mlx5ib" | #define DRIVER_NAME "mlx5ib" | ||||
#ifndef DRIVER_VERSION | #ifndef DRIVER_VERSION | ||||
#define DRIVER_VERSION "3.6.0" | #define DRIVER_VERSION "3.6.0" | ||||
▲ Show 20 Lines • Show All 1,045 Lines • ▼ Show 20 Lines | static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, | ||||
err = mlx5_set_port_caps(dev->mdev, port, tmp); | err = mlx5_set_port_caps(dev->mdev, port, tmp); | ||||
out: | out: | ||||
mutex_unlock(&dev->cap_mask_mutex); | mutex_unlock(&dev->cap_mask_mutex); | ||||
return err; | return err; | ||||
} | } | ||||
static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps) | |||||
{ | |||||
mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n", | |||||
caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n"); | |||||
} | |||||
static u16 calc_dynamic_bfregs(int uars_per_sys_page) | static u16 calc_dynamic_bfregs(int uars_per_sys_page) | ||||
{ | { | ||||
/* Large page with non 4k uar support might limit the dynamic size */ | /* Large page with non 4k uar support might limit the dynamic size */ | ||||
if (uars_per_sys_page == 1 && PAGE_SIZE > 4096) | if (uars_per_sys_page == 1 && PAGE_SIZE > 4096) | ||||
return MLX5_MIN_DYN_BFREGS; | return MLX5_MIN_DYN_BFREGS; | ||||
return MLX5_MAX_DYN_BFREGS; | return MLX5_MAX_DYN_BFREGS; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 72 Lines • ▼ Show 20 Lines | static void deallocate_uars(struct mlx5_ib_dev *dev, | ||||
bfregi = &context->bfregi; | bfregi = &context->bfregi; | ||||
for (i = 0; i < bfregi->num_sys_pages; i++) | for (i = 0; i < bfregi->num_sys_pages; i++) | ||||
if (i < bfregi->num_static_sys_pages || | if (i < bfregi->num_static_sys_pages || | ||||
bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX) | bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX) | ||||
mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]); | mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]); | ||||
} | } | ||||
static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn, | ||||
u16 uid) | |||||
{ | |||||
int err; | |||||
if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) | |||||
return 0; | |||||
err = mlx5_alloc_transport_domain(dev->mdev, tdn, uid); | |||||
if (err) | |||||
return err; | |||||
return 0; | |||||
} | |||||
static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn, | |||||
u16 uid) | |||||
{ | |||||
if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) | |||||
return; | |||||
mlx5_dealloc_transport_domain(dev->mdev, tdn, uid); | |||||
} | |||||
static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, | |||||
struct ib_udata *udata) | struct ib_udata *udata) | ||||
{ | { | ||||
struct ib_device *ibdev = uctx->device; | |||||
struct mlx5_ib_dev *dev = to_mdev(ibdev); | struct mlx5_ib_dev *dev = to_mdev(ibdev); | ||||
struct mlx5_ib_alloc_ucontext_req_v2 req = {}; | struct mlx5_ib_alloc_ucontext_req_v2 req = {}; | ||||
struct mlx5_ib_alloc_ucontext_resp resp = {}; | struct mlx5_ib_alloc_ucontext_resp resp = {}; | ||||
struct mlx5_ib_ucontext *context; | struct mlx5_ib_ucontext *context = to_mucontext(uctx); | ||||
struct mlx5_bfreg_info *bfregi; | struct mlx5_bfreg_info *bfregi; | ||||
int ver; | int ver; | ||||
int err; | int err; | ||||
size_t reqlen; | |||||
size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2, | size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2, | ||||
max_cqe_version); | max_cqe_version); | ||||
bool lib_uar_4k; | bool lib_uar_4k; | ||||
bool lib_uar_dyn; | bool lib_uar_dyn; | ||||
if (!dev->ib_active) | if (!dev->ib_active) | ||||
return ERR_PTR(-EAGAIN); | return -EAGAIN; | ||||
if (udata->inlen < sizeof(struct ib_uverbs_cmd_hdr)) | if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req)) | ||||
return ERR_PTR(-EINVAL); | |||||
reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr); | |||||
if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req)) | |||||
ver = 0; | ver = 0; | ||||
else if (reqlen >= min_req_v2) | else if (udata->inlen >= min_req_v2) | ||||
ver = 2; | ver = 2; | ||||
else | else | ||||
return ERR_PTR(-EINVAL); | return -EINVAL; | ||||
err = ib_copy_from_udata(&req, udata, min(reqlen, sizeof(req))); | err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req))); | ||||
if (err) | if (err) | ||||
return ERR_PTR(err); | return err; | ||||
if (req.flags) | if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX) | ||||
return ERR_PTR(-EINVAL); | return -EOPNOTSUPP; | ||||
if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2) | if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2) | ||||
return ERR_PTR(-EOPNOTSUPP); | return -EOPNOTSUPP; | ||||
req.total_num_bfregs = ALIGN(req.total_num_bfregs, | req.total_num_bfregs = ALIGN(req.total_num_bfregs, | ||||
MLX5_NON_FP_BFREGS_PER_UAR); | MLX5_NON_FP_BFREGS_PER_UAR); | ||||
if (req.num_low_latency_bfregs > req.total_num_bfregs - 1) | if (req.num_low_latency_bfregs > req.total_num_bfregs - 1) | ||||
return ERR_PTR(-EINVAL); | return -EINVAL; | ||||
if (reqlen > sizeof(req) && | |||||
!ib_is_udata_cleared(udata, sizeof(req), | |||||
reqlen - sizeof(req))) | |||||
return ERR_PTR(-EOPNOTSUPP); | |||||
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); | resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); | ||||
if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf)) | if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf)) | ||||
resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); | resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); | ||||
resp.cache_line_size = cache_line_size(); | resp.cache_line_size = cache_line_size(); | ||||
resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); | resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); | ||||
resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); | resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); | ||||
resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); | resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); | ||||
resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); | resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); | ||||
resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); | resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); | ||||
resp.cqe_version = min_t(__u8, | resp.cqe_version = min_t(__u8, | ||||
(__u8)MLX5_CAP_GEN(dev->mdev, cqe_version), | (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version), | ||||
req.max_cqe_version); | req.max_cqe_version); | ||||
resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ? | resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ? | ||||
MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT; | MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT; | ||||
resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? | resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? | ||||
MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1; | MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1; | ||||
resp.response_length = min(offsetof(typeof(resp), response_length) + | resp.response_length = min(offsetof(typeof(resp), response_length) + | ||||
sizeof(resp.response_length), udata->outlen); | sizeof(resp.response_length), udata->outlen); | ||||
context = kzalloc(sizeof(*context), GFP_KERNEL); | |||||
if (!context) | |||||
return ERR_PTR(-ENOMEM); | |||||
lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR; | lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR; | ||||
lib_uar_dyn = req.lib_caps & MLX5_LIB_CAP_DYN_UAR; | lib_uar_dyn = req.lib_caps & MLX5_LIB_CAP_DYN_UAR; | ||||
bfregi = &context->bfregi; | bfregi = &context->bfregi; | ||||
if (lib_uar_dyn) { | if (lib_uar_dyn) { | ||||
bfregi->lib_uar_dyn = lib_uar_dyn; | bfregi->lib_uar_dyn = lib_uar_dyn; | ||||
goto uar_done; | goto uar_done; | ||||
} | } | ||||
Show All 20 Lines | if (!bfregi->sys_pages) { | ||||
goto out_count; | goto out_count; | ||||
} | } | ||||
err = allocate_uars(dev, context); | err = allocate_uars(dev, context); | ||||
if (err) | if (err) | ||||
goto out_sys_pages; | goto out_sys_pages; | ||||
uar_done: | uar_done: | ||||
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) { | |||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | err = mlx5_ib_devx_create(dev, true); | ||||
context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range; | if (err < 0) | ||||
#endif | |||||
if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) { | |||||
err = mlx5_alloc_transport_domain(dev->mdev, | |||||
&context->tdn); | |||||
if (err) | |||||
goto out_uars; | goto out_uars; | ||||
context->devx_uid = err; | |||||
} | } | ||||
INIT_LIST_HEAD(&context->vma_private_list); | err = mlx5_ib_alloc_transport_domain(dev, &context->tdn, | ||||
context->devx_uid); | |||||
if (err) | |||||
goto out_devx; | |||||
INIT_LIST_HEAD(&context->db_page_list); | INIT_LIST_HEAD(&context->db_page_list); | ||||
mutex_init(&context->db_page_mutex); | mutex_init(&context->db_page_mutex); | ||||
resp.tot_bfregs = lib_uar_dyn ? 0 : req.total_num_bfregs; | resp.tot_bfregs = lib_uar_dyn ? 0 : req.total_num_bfregs; | ||||
resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports); | resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports); | ||||
if (field_avail(typeof(resp), cqe_version, udata->outlen)) | if (field_avail(typeof(resp), cqe_version, udata->outlen)) | ||||
resp.response_length += sizeof(resp.cqe_version); | resp.response_length += sizeof(resp.cqe_version); | ||||
Show All 28 Lines | uar_done: | ||||
if (offsetofend(typeof(resp), num_dyn_bfregs) <= udata->outlen) { | if (offsetofend(typeof(resp), num_dyn_bfregs) <= udata->outlen) { | ||||
resp.num_dyn_bfregs = bfregi->num_dyn_bfregs; | resp.num_dyn_bfregs = bfregi->num_dyn_bfregs; | ||||
resp.response_length += sizeof(resp.num_dyn_bfregs); | resp.response_length += sizeof(resp.num_dyn_bfregs); | ||||
} | } | ||||
err = ib_copy_to_udata(udata, &resp, resp.response_length); | err = ib_copy_to_udata(udata, &resp, resp.response_length); | ||||
if (err) | if (err) | ||||
goto out_td; | goto out_mdev; | ||||
bfregi->ver = ver; | bfregi->ver = ver; | ||||
bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs; | bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs; | ||||
context->cqe_version = resp.cqe_version; | context->cqe_version = resp.cqe_version; | ||||
context->lib_caps = req.lib_caps; | |||||
print_lib_caps(dev, context->lib_caps); | |||||
return &context->ibucontext; | return 0; | ||||
out_td: | out_mdev: | ||||
if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) | mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid); | ||||
mlx5_dealloc_transport_domain(dev->mdev, context->tdn); | out_devx: | ||||
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) | |||||
mlx5_ib_devx_destroy(dev, context->devx_uid); | |||||
out_uars: | out_uars: | ||||
deallocate_uars(dev, context); | deallocate_uars(dev, context); | ||||
out_sys_pages: | out_sys_pages: | ||||
kfree(bfregi->sys_pages); | kfree(bfregi->sys_pages); | ||||
out_count: | out_count: | ||||
kfree(bfregi->count); | kfree(bfregi->count); | ||||
out_ctx: | out_ctx: | ||||
kfree(context); | return err; | ||||
return ERR_PTR(err); | |||||
} | } | ||||
static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) | static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) | ||||
{ | { | ||||
struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); | struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); | ||||
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); | struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); | ||||
struct mlx5_bfreg_info *bfregi; | struct mlx5_bfreg_info *bfregi; | ||||
bfregi = &context->bfregi; | bfregi = &context->bfregi; | ||||
if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) | mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid); | ||||
mlx5_dealloc_transport_domain(dev->mdev, context->tdn); | |||||
if (context->devx_uid) | |||||
mlx5_ib_devx_destroy(dev, context->devx_uid); | |||||
deallocate_uars(dev, context); | deallocate_uars(dev, context); | ||||
kfree(bfregi->sys_pages); | kfree(bfregi->sys_pages); | ||||
kfree(bfregi->count); | kfree(bfregi->count); | ||||
kfree(context); | |||||
return 0; | |||||
} | } | ||||
static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, | static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, | ||||
int uar_idx) | int uar_idx) | ||||
{ | { | ||||
int fw_uars_per_page; | int fw_uars_per_page; | ||||
fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1; | fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1; | ||||
Show All 17 Lines | |||||
} | } | ||||
/* Index resides in an extra byte to enable larger values than 255 */ | /* Index resides in an extra byte to enable larger values than 255 */ | ||||
static int get_extended_index(unsigned long offset) | static int get_extended_index(unsigned long offset) | ||||
{ | { | ||||
return get_arg(offset) | ((offset >> 16) & 0xff) << 8; | return get_arg(offset) | ((offset >> 16) & 0xff) << 8; | ||||
} | } | ||||
static void mlx5_ib_vma_open(struct vm_area_struct *area) | |||||
{ | |||||
/* vma_open is called when a new VMA is created on top of our VMA. This | |||||
* is done through either mremap flow or split_vma (usually due to | |||||
* mlock, madvise, munmap, etc.) We do not support a clone of the VMA, | |||||
* as this VMA is strongly hardware related. Therefore we set the | |||||
* vm_ops of the newly created/cloned VMA to NULL, to prevent it from | |||||
* calling us again and trying to do incorrect actions. We assume that | |||||
* the original VMA size is exactly a single page, and therefore all | |||||
* "splitting" operation will not happen to it. | |||||
*/ | |||||
area->vm_ops = NULL; | |||||
} | |||||
static void mlx5_ib_vma_close(struct vm_area_struct *area) | |||||
{ | |||||
struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data; | |||||
/* It's guaranteed that all VMAs opened on a FD are closed before the | |||||
* file itself is closed, therefore no sync is needed with the regular | |||||
* closing flow. (e.g. mlx5 ib_dealloc_ucontext) | |||||
* However need a sync with accessing the vma as part of | |||||
* mlx5_ib_disassociate_ucontext. | |||||
* The close operation is usually called under mm->mmap_sem except when | |||||
* process is exiting. | |||||
* The exiting case is handled explicitly as part of | |||||
* mlx5_ib_disassociate_ucontext. | |||||
*/ | |||||
mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data; | |||||
/* setting the vma context pointer to null in the mlx5_ib driver's | |||||
* private data, to protect a race condition in | |||||
* mlx5_ib_disassociate_ucontext(). | |||||
*/ | |||||
mlx5_ib_vma_priv_data->vma = NULL; | |||||
list_del(&mlx5_ib_vma_priv_data->list); | |||||
kfree(mlx5_ib_vma_priv_data); | |||||
} | |||||
static const struct vm_operations_struct mlx5_ib_vm_ops = { | |||||
.open = mlx5_ib_vma_open, | |||||
.close = mlx5_ib_vma_close | |||||
}; | |||||
static int mlx5_ib_set_vma_data(struct vm_area_struct *vma, | |||||
struct mlx5_ib_ucontext *ctx) | |||||
{ | |||||
struct mlx5_ib_vma_private_data *vma_prv; | |||||
struct list_head *vma_head = &ctx->vma_private_list; | |||||
vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL); | |||||
if (!vma_prv) | |||||
return -ENOMEM; | |||||
vma_prv->vma = vma; | |||||
vma->vm_private_data = vma_prv; | |||||
vma->vm_ops = &mlx5_ib_vm_ops; | |||||
list_add(&vma_prv->list, vma_head); | |||||
return 0; | |||||
} | |||||
static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) | static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) | ||||
{ | { | ||||
int ret; | |||||
struct vm_area_struct *vma; | |||||
struct mlx5_ib_vma_private_data *vma_private, *n; | |||||
struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); | |||||
struct task_struct *owning_process = NULL; | |||||
struct mm_struct *owning_mm = NULL; | |||||
owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID); | |||||
if (!owning_process) | |||||
return; | |||||
owning_mm = get_task_mm(owning_process); | |||||
if (!owning_mm) { | |||||
pr_info("no mm, disassociate ucontext is pending task termination\n"); | |||||
while (1) { | |||||
put_task_struct(owning_process); | |||||
usleep_range(1000, 2000); | |||||
owning_process = get_pid_task(ibcontext->tgid, | |||||
PIDTYPE_PID); | |||||
if (!owning_process || owning_process->task_thread-> | |||||
td_proc->p_state == PRS_ZOMBIE) { | |||||
pr_info("disassociate ucontext done, task was terminated\n"); | |||||
/* in case task was dead need to release the | |||||
* task struct. | |||||
*/ | |||||
if (owning_process) | |||||
put_task_struct(owning_process); | |||||
return; | |||||
} | } | ||||
} | |||||
} | |||||
/* need to protect from a race on closing the vma as part of | |||||
* mlx5_ib_vma_close. | |||||
*/ | |||||
down_write(&owning_mm->mmap_sem); | |||||
list_for_each_entry_safe(vma_private, n, &context->vma_private_list, | |||||
list) { | |||||
vma = vma_private->vma; | |||||
ret = zap_vma_ptes(vma, vma->vm_start, | |||||
PAGE_SIZE); | |||||
if (ret == -ENOTSUP) { | |||||
if (bootverbose) | |||||
WARN_ONCE( | |||||
"%s: zap_vma_ptes not implemented for unmanaged mappings", __func__); | |||||
} else { | |||||
WARN(ret, "%s: zap_vma_ptes failed, error %d", | |||||
__func__, -ret); | |||||
} | |||||
/* context going to be destroyed, should | |||||
* not access ops any more. | |||||
*/ | |||||
/* XXXKIB vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE); */ | |||||
vma->vm_ops = NULL; | |||||
list_del(&vma_private->list); | |||||
kfree(vma_private); | |||||
} | |||||
up_write(&owning_mm->mmap_sem); | |||||
mmput(owning_mm); | |||||
put_task_struct(owning_process); | |||||
} | |||||
static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd) | static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd) | ||||
{ | { | ||||
switch (cmd) { | switch (cmd) { | ||||
case MLX5_IB_MMAP_WC_PAGE: | case MLX5_IB_MMAP_WC_PAGE: | ||||
return "WC"; | return "WC"; | ||||
case MLX5_IB_MMAP_REGULAR_PAGE: | case MLX5_IB_MMAP_REGULAR_PAGE: | ||||
return "best effort WC"; | return "best effort WC"; | ||||
case MLX5_IB_MMAP_NC_PAGE: | case MLX5_IB_MMAP_NC_PAGE: | ||||
return "NC"; | return "NC"; | ||||
default: | default: | ||||
return NULL; | return NULL; | ||||
} | } | ||||
} | } | ||||
static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev, | |||||
struct vm_area_struct *vma, | |||||
struct mlx5_ib_ucontext *context) | |||||
{ | |||||
if ((vma->vm_end - vma->vm_start != PAGE_SIZE) || | |||||
!(vma->vm_flags & VM_SHARED)) | |||||
return -EINVAL; | |||||
if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1) | |||||
return -EOPNOTSUPP; | |||||
if (vma->vm_flags & (VM_WRITE | VM_EXEC)) | |||||
return -EPERM; | |||||
return -EOPNOTSUPP; | |||||
} | |||||
static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry) | |||||
{ | |||||
struct mlx5_user_mmap_entry *mentry = to_mmmap(entry); | |||||
struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device); | |||||
switch (mentry->mmap_flag) { | |||||
case MLX5_IB_MMAP_TYPE_UAR_WC: | |||||
case MLX5_IB_MMAP_TYPE_UAR_NC: | |||||
mlx5_cmd_free_uar(dev->mdev, mentry->page_idx); | |||||
kfree(mentry); | |||||
break; | |||||
default: | |||||
WARN_ON(true); | |||||
} | |||||
} | |||||
static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, | static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, | ||||
struct vm_area_struct *vma, | struct vm_area_struct *vma, | ||||
struct mlx5_ib_ucontext *context) | struct mlx5_ib_ucontext *context) | ||||
{ | { | ||||
struct mlx5_bfreg_info *bfregi = &context->bfregi; | struct mlx5_bfreg_info *bfregi = &context->bfregi; | ||||
int err; | int err; | ||||
unsigned long idx; | unsigned long idx; | ||||
phys_addr_t pfn; | phys_addr_t pfn; | ||||
pgprot_t prot; | pgprot_t prot; | ||||
u32 bfreg_dyn_idx = 0; | u32 bfreg_dyn_idx = 0; | ||||
u32 uar_index; | u32 uar_index; | ||||
int dyn_uar = (cmd == MLX5_IB_MMAP_WC_PAGE); | int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC); | ||||
int max_valid_idx = dyn_uar ? bfregi->num_sys_pages : | int max_valid_idx = dyn_uar ? bfregi->num_sys_pages : | ||||
bfregi->num_static_sys_pages; | bfregi->num_static_sys_pages; | ||||
if (bfregi->lib_uar_dyn) | if (bfregi->lib_uar_dyn) | ||||
return -EINVAL; | return -EINVAL; | ||||
if (vma->vm_end - vma->vm_start != PAGE_SIZE) | if (vma->vm_end - vma->vm_start != PAGE_SIZE) | ||||
return -EINVAL; | return -EINVAL; | ||||
if (dyn_uar) | if (dyn_uar) | ||||
idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages; | idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages; | ||||
else | else | ||||
idx = get_index(vma->vm_pgoff); | idx = get_index(vma->vm_pgoff); | ||||
if (idx >= max_valid_idx) { | if (idx >= max_valid_idx) { | ||||
mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n", | mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n", | ||||
idx, max_valid_idx); | idx, max_valid_idx); | ||||
return -EINVAL; | return -EINVAL; | ||||
} | } | ||||
switch (cmd) { | switch (cmd) { | ||||
case MLX5_IB_MMAP_WC_PAGE: | case MLX5_IB_MMAP_WC_PAGE: | ||||
case MLX5_IB_MMAP_ALLOC_WC: | |||||
case MLX5_IB_MMAP_REGULAR_PAGE: | case MLX5_IB_MMAP_REGULAR_PAGE: | ||||
/* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */ | /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */ | ||||
prot = pgprot_writecombine(vma->vm_page_prot); | prot = pgprot_writecombine(vma->vm_page_prot); | ||||
break; | break; | ||||
case MLX5_IB_MMAP_NC_PAGE: | case MLX5_IB_MMAP_NC_PAGE: | ||||
prot = pgprot_noncached(vma->vm_page_prot); | prot = pgprot_noncached(vma->vm_page_prot); | ||||
break; | break; | ||||
default: | default: | ||||
Show All 31 Lines | if (dyn_uar) { | ||||
} | } | ||||
} else { | } else { | ||||
uar_index = bfregi->sys_pages[idx]; | uar_index = bfregi->sys_pages[idx]; | ||||
} | } | ||||
pfn = uar_index2pfn(dev, uar_index); | pfn = uar_index2pfn(dev, uar_index); | ||||
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn); | mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn); | ||||
vma->vm_page_prot = prot; | err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE, | ||||
err = io_remap_pfn_range(vma, vma->vm_start, pfn, | prot, NULL); | ||||
PAGE_SIZE, vma->vm_page_prot); | |||||
if (err) { | if (err) { | ||||
mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%llx, pfn=%pa, mmap_cmd=%s\n", | mlx5_ib_err(dev, | ||||
err, (unsigned long long)vma->vm_start, &pfn, mmap_cmd2str(cmd)); | "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n", | ||||
err, mmap_cmd2str(cmd)); | |||||
goto err; | goto err; | ||||
} | } | ||||
if (dyn_uar) | if (dyn_uar) | ||||
bfregi->sys_pages[idx] = uar_index; | bfregi->sys_pages[idx] = uar_index; | ||||
return mlx5_ib_set_vma_data(vma, context); | return 0; | ||||
err: | err: | ||||
if (!dyn_uar) | if (!dyn_uar) | ||||
return err; | return err; | ||||
mlx5_cmd_free_uar(dev->mdev, idx); | mlx5_cmd_free_uar(dev->mdev, idx); | ||||
free_bfreg: | free_bfreg: | ||||
mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx); | mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx); | ||||
return err; | return err; | ||||
} | } | ||||
static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma) | |||||
{ | |||||
unsigned long idx; | |||||
u8 command; | |||||
command = get_command(vma->vm_pgoff); | |||||
idx = get_extended_index(vma->vm_pgoff); | |||||
return (command << 16 | idx); | |||||
} | |||||
static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev, | |||||
struct vm_area_struct *vma, | |||||
struct ib_ucontext *ucontext) | |||||
{ | |||||
struct mlx5_user_mmap_entry *mentry; | |||||
struct rdma_user_mmap_entry *entry; | |||||
unsigned long pgoff; | |||||
pgprot_t prot; | |||||
phys_addr_t pfn; | |||||
int ret; | |||||
pgoff = mlx5_vma_to_pgoff(vma); | |||||
entry = rdma_user_mmap_entry_get_pgoff(ucontext, pgoff); | |||||
if (!entry) | |||||
return -EINVAL; | |||||
mentry = to_mmmap(entry); | |||||
pfn = (mentry->address >> PAGE_SHIFT); | |||||
if (mentry->mmap_flag == MLX5_IB_MMAP_TYPE_VAR || | |||||
mentry->mmap_flag == MLX5_IB_MMAP_TYPE_UAR_NC) | |||||
prot = pgprot_noncached(vma->vm_page_prot); | |||||
else | |||||
prot = pgprot_writecombine(vma->vm_page_prot); | |||||
ret = rdma_user_mmap_io(ucontext, vma, pfn, | |||||
entry->npages * PAGE_SIZE, | |||||
prot, | |||||
entry); | |||||
rdma_user_mmap_entry_put(&mentry->rdma_entry); | |||||
return ret; | |||||
} | |||||
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) | static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) | ||||
{ | { | ||||
struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); | struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); | ||||
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); | struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); | ||||
unsigned long command; | unsigned long command; | ||||
phys_addr_t pfn; | phys_addr_t pfn; | ||||
command = get_command(vma->vm_pgoff); | command = get_command(vma->vm_pgoff); | ||||
switch (command) { | switch (command) { | ||||
case MLX5_IB_MMAP_WC_PAGE: | case MLX5_IB_MMAP_WC_PAGE: | ||||
case MLX5_IB_MMAP_ALLOC_WC: | |||||
if (!dev->wc_support) | |||||
return -EPERM; | |||||
/* FALLTHROUGH */ | |||||
case MLX5_IB_MMAP_NC_PAGE: | case MLX5_IB_MMAP_NC_PAGE: | ||||
case MLX5_IB_MMAP_REGULAR_PAGE: | case MLX5_IB_MMAP_REGULAR_PAGE: | ||||
return uar_mmap(dev, command, vma, context); | return uar_mmap(dev, command, vma, context); | ||||
case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES: | case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES: | ||||
return -ENOSYS; | return -ENOSYS; | ||||
case MLX5_IB_MMAP_CORE_CLOCK: | case MLX5_IB_MMAP_CORE_CLOCK: | ||||
if (vma->vm_end - vma->vm_start != PAGE_SIZE) | if (vma->vm_end - vma->vm_start != PAGE_SIZE) | ||||
return -EINVAL; | return -EINVAL; | ||||
if (vma->vm_flags & VM_WRITE) | if (vma->vm_flags & VM_WRITE) | ||||
return -EPERM; | return -EPERM; | ||||
/* Don't expose to user-space information it shouldn't have */ | /* Don't expose to user-space information it shouldn't have */ | ||||
if (PAGE_SIZE > 4096) | if (PAGE_SIZE > 4096) | ||||
return -EOPNOTSUPP; | return -EOPNOTSUPP; | ||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | |||||
pfn = (dev->mdev->iseg_base + | pfn = (dev->mdev->iseg_base + | ||||
offsetof(struct mlx5_init_seg, internal_timer_h)) >> | offsetof(struct mlx5_init_seg, internal_timer_h)) >> | ||||
PAGE_SHIFT; | PAGE_SHIFT; | ||||
if (io_remap_pfn_range(vma, vma->vm_start, pfn, | return rdma_user_mmap_io(&context->ibucontext, vma, pfn, | ||||
PAGE_SIZE, vma->vm_page_prot)) | PAGE_SIZE, | ||||
return -EAGAIN; | pgprot_noncached(vma->vm_page_prot), | ||||
NULL); | |||||
case MLX5_IB_MMAP_CLOCK_INFO: | |||||
return mlx5_ib_mmap_clock_info_page(dev, vma, context); | |||||
mlx5_ib_dbg(dev, "mapped internal timer at 0x%llx, PA 0x%llx\n", | |||||
(unsigned long long)vma->vm_start, | |||||
(unsigned long long)pfn << PAGE_SHIFT); | |||||
break; | |||||
default: | default: | ||||
return -EINVAL; | return mlx5_ib_mmap_offset(dev, vma, ibcontext); | ||||
} | } | ||||
return 0; | return 0; | ||||
} | } | ||||
static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, | static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) | ||||
struct ib_ucontext *context, | |||||
struct ib_udata *udata) | |||||
{ | { | ||||
struct mlx5_ib_pd *pd = to_mpd(ibpd); | |||||
struct ib_device *ibdev = ibpd->device; | |||||
struct mlx5_ib_alloc_pd_resp resp; | struct mlx5_ib_alloc_pd_resp resp; | ||||
struct mlx5_ib_pd *pd; | |||||
int err; | int err; | ||||
struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context( | |||||
udata, struct mlx5_ib_ucontext, ibucontext); | |||||
u16 uid = context ? context->devx_uid : 0; | |||||
pd = kmalloc(sizeof(*pd), GFP_KERNEL); | err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn, uid); | ||||
if (!pd) | if (err) | ||||
return ERR_PTR(-ENOMEM); | return (err); | ||||
err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn); | pd->uid = uid; | ||||
if (err) { | if (udata) { | ||||
kfree(pd); | |||||
return ERR_PTR(err); | |||||
} | |||||
if (context) { | |||||
resp.pdn = pd->pdn; | resp.pdn = pd->pdn; | ||||
if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { | if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { | ||||
mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn); | mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid); | ||||
kfree(pd); | return -EFAULT; | ||||
return ERR_PTR(-EFAULT); | |||||
} | } | ||||
} | } | ||||
return &pd->ibpd; | return 0; | ||||
} | } | ||||
static int mlx5_ib_dealloc_pd(struct ib_pd *pd) | static void mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) | ||||
{ | { | ||||
struct mlx5_ib_dev *mdev = to_mdev(pd->device); | struct mlx5_ib_dev *mdev = to_mdev(pd->device); | ||||
struct mlx5_ib_pd *mpd = to_mpd(pd); | struct mlx5_ib_pd *mpd = to_mpd(pd); | ||||
mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn); | mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid); | ||||
kfree(mpd); | |||||
return 0; | |||||
} | } | ||||
enum { | enum { | ||||
MATCH_CRITERIA_ENABLE_OUTER_BIT, | MATCH_CRITERIA_ENABLE_OUTER_BIT, | ||||
MATCH_CRITERIA_ENABLE_MISC_BIT, | MATCH_CRITERIA_ENABLE_MISC_BIT, | ||||
MATCH_CRITERIA_ENABLE_INNER_BIT | MATCH_CRITERIA_ENABLE_INNER_BIT | ||||
}; | }; | ||||
▲ Show 20 Lines • Show All 573 Lines • ▼ Show 20 Lines | err_tx: | ||||
ft_rx->refcount--; | ft_rx->refcount--; | ||||
kfree(handler_rx); | kfree(handler_rx); | ||||
err: | err: | ||||
return ERR_PTR(err); | return ERR_PTR(err); | ||||
} | } | ||||
static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, | static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, | ||||
struct ib_flow_attr *flow_attr, | struct ib_flow_attr *flow_attr, | ||||
int domain) | int domain, | ||||
struct ib_udata *udata) | |||||
{ | { | ||||
struct mlx5_ib_dev *dev = to_mdev(qp->device); | struct mlx5_ib_dev *dev = to_mdev(qp->device); | ||||
struct mlx5_ib_qp *mqp = to_mqp(qp); | struct mlx5_ib_qp *mqp = to_mqp(qp); | ||||
struct mlx5_ib_flow_handler *handler = NULL; | struct mlx5_ib_flow_handler *handler = NULL; | ||||
struct mlx5_flow_destination *dst = NULL; | struct mlx5_flow_destination *dst = NULL; | ||||
struct mlx5_ib_flow_prio *ft_prio_tx = NULL; | struct mlx5_ib_flow_prio *ft_prio_tx = NULL; | ||||
struct mlx5_ib_flow_prio *ft_prio; | struct mlx5_ib_flow_prio *ft_prio; | ||||
int err; | int err; | ||||
if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) | if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) | ||||
return ERR_PTR(-ENOSPC); | return ERR_PTR(-ENOSPC); | ||||
if (domain != IB_FLOW_DOMAIN_USER || | if (domain != IB_FLOW_DOMAIN_USER || | ||||
udata != NULL || | |||||
flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) || | flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) || | ||||
(flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)) | (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)) | ||||
return ERR_PTR(-EINVAL); | return ERR_PTR(-EINVAL); | ||||
dst = kzalloc(sizeof(*dst), GFP_KERNEL); | dst = kzalloc(sizeof(*dst), GFP_KERNEL); | ||||
if (!dst) | if (!dst) | ||||
return ERR_PTR(-ENOMEM); | return ERR_PTR(-ENOMEM); | ||||
▲ Show 20 Lines • Show All 346 Lines • ▼ Show 20 Lines | |||||
static void destroy_umrc_res(struct mlx5_ib_dev *dev) | static void destroy_umrc_res(struct mlx5_ib_dev *dev) | ||||
{ | { | ||||
int err; | int err; | ||||
err = mlx5_mr_cache_cleanup(dev); | err = mlx5_mr_cache_cleanup(dev); | ||||
if (err) | if (err) | ||||
mlx5_ib_warn(dev, "mr cache cleanup failed\n"); | mlx5_ib_warn(dev, "mr cache cleanup failed\n"); | ||||
mlx5_ib_destroy_qp(dev->umrc.qp); | if (dev->umrc.qp) | ||||
mlx5_ib_destroy_qp(dev->umrc.qp, NULL); | |||||
if (dev->umrc.cq) | |||||
ib_free_cq(dev->umrc.cq); | ib_free_cq(dev->umrc.cq); | ||||
if (dev->umrc.pd) | |||||
ib_dealloc_pd(dev->umrc.pd); | ib_dealloc_pd(dev->umrc.pd); | ||||
} | } | ||||
enum { | enum { | ||||
MAX_UMR_WR = 128, | MAX_UMR_WR = 128, | ||||
}; | }; | ||||
static int create_umr_res(struct mlx5_ib_dev *dev) | static int create_umr_res(struct mlx5_ib_dev *dev) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 82 Lines • ▼ Show 20 Lines | static int create_umr_res(struct mlx5_ib_dev *dev) | ||||
} | } | ||||
kfree(attr); | kfree(attr); | ||||
kfree(init_attr); | kfree(init_attr); | ||||
return 0; | return 0; | ||||
error_4: | error_4: | ||||
mlx5_ib_destroy_qp(qp); | mlx5_ib_destroy_qp(qp, NULL); | ||||
dev->umrc.qp = NULL; | |||||
error_3: | error_3: | ||||
ib_free_cq(cq); | ib_free_cq(cq); | ||||
dev->umrc.cq = NULL; | |||||
error_2: | error_2: | ||||
ib_dealloc_pd(pd); | ib_dealloc_pd(pd); | ||||
dev->umrc.pd = NULL; | |||||
error_0: | error_0: | ||||
kfree(attr); | kfree(attr); | ||||
kfree(init_attr); | kfree(init_attr); | ||||
return ret; | return ret; | ||||
} | } | ||||
static int create_dev_resources(struct mlx5_ib_resources *devr) | static int create_dev_resources(struct mlx5_ib_resources *devr) | ||||
{ | { | ||||
struct ib_srq_init_attr attr; | struct ib_srq_init_attr attr; | ||||
struct mlx5_ib_dev *dev; | struct mlx5_ib_dev *dev; | ||||
struct ib_device *ibdev; | |||||
struct ib_cq_init_attr cq_attr = {.cqe = 1}; | struct ib_cq_init_attr cq_attr = {.cqe = 1}; | ||||
int port; | int port; | ||||
int ret = 0; | int ret = 0; | ||||
dev = container_of(devr, struct mlx5_ib_dev, devr); | dev = container_of(devr, struct mlx5_ib_dev, devr); | ||||
ibdev = &dev->ib_dev; | |||||
mutex_init(&devr->mutex); | mutex_init(&devr->mutex); | ||||
devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); | devr->p0 = rdma_zalloc_drv_obj(ibdev, ib_pd); | ||||
if (IS_ERR(devr->p0)) { | if (!devr->p0) | ||||
ret = PTR_ERR(devr->p0); | return -ENOMEM; | ||||
goto error0; | |||||
} | devr->p0->device = ibdev; | ||||
devr->p0->device = &dev->ib_dev; | |||||
devr->p0->uobject = NULL; | devr->p0->uobject = NULL; | ||||
atomic_set(&devr->p0->usecnt, 0); | atomic_set(&devr->p0->usecnt, 0); | ||||
devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL); | ret = mlx5_ib_alloc_pd(devr->p0, NULL); | ||||
if (IS_ERR(devr->c0)) { | if (ret) | ||||
ret = PTR_ERR(devr->c0); | goto error0; | ||||
devr->c0 = rdma_zalloc_drv_obj(ibdev, ib_cq); | |||||
if (!devr->c0) { | |||||
ret = -ENOMEM; | |||||
goto error1; | goto error1; | ||||
} | } | ||||
devr->c0->device = &dev->ib_dev; | devr->c0->device = &dev->ib_dev; | ||||
devr->c0->uobject = NULL; | |||||
devr->c0->comp_handler = NULL; | |||||
devr->c0->event_handler = NULL; | |||||
devr->c0->cq_context = NULL; | |||||
atomic_set(&devr->c0->usecnt, 0); | atomic_set(&devr->c0->usecnt, 0); | ||||
devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); | ret = mlx5_ib_create_cq(devr->c0, &cq_attr, NULL); | ||||
if (ret) | |||||
goto err_create_cq; | |||||
devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL); | |||||
if (IS_ERR(devr->x0)) { | if (IS_ERR(devr->x0)) { | ||||
ret = PTR_ERR(devr->x0); | ret = PTR_ERR(devr->x0); | ||||
goto error2; | goto error2; | ||||
} | } | ||||
devr->x0->device = &dev->ib_dev; | devr->x0->device = &dev->ib_dev; | ||||
devr->x0->inode = NULL; | devr->x0->inode = NULL; | ||||
atomic_set(&devr->x0->usecnt, 0); | atomic_set(&devr->x0->usecnt, 0); | ||||
mutex_init(&devr->x0->tgt_qp_mutex); | mutex_init(&devr->x0->tgt_qp_mutex); | ||||
INIT_LIST_HEAD(&devr->x0->tgt_qp_list); | INIT_LIST_HEAD(&devr->x0->tgt_qp_list); | ||||
devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); | devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL); | ||||
if (IS_ERR(devr->x1)) { | if (IS_ERR(devr->x1)) { | ||||
ret = PTR_ERR(devr->x1); | ret = PTR_ERR(devr->x1); | ||||
goto error3; | goto error3; | ||||
} | } | ||||
devr->x1->device = &dev->ib_dev; | devr->x1->device = &dev->ib_dev; | ||||
devr->x1->inode = NULL; | devr->x1->inode = NULL; | ||||
atomic_set(&devr->x1->usecnt, 0); | atomic_set(&devr->x1->usecnt, 0); | ||||
mutex_init(&devr->x1->tgt_qp_mutex); | mutex_init(&devr->x1->tgt_qp_mutex); | ||||
INIT_LIST_HEAD(&devr->x1->tgt_qp_list); | INIT_LIST_HEAD(&devr->x1->tgt_qp_list); | ||||
memset(&attr, 0, sizeof(attr)); | memset(&attr, 0, sizeof(attr)); | ||||
attr.attr.max_sge = 1; | attr.attr.max_sge = 1; | ||||
attr.attr.max_wr = 1; | attr.attr.max_wr = 1; | ||||
attr.srq_type = IB_SRQT_XRC; | attr.srq_type = IB_SRQT_XRC; | ||||
attr.ext.xrc.cq = devr->c0; | attr.ext.cq = devr->c0; | ||||
attr.ext.xrc.xrcd = devr->x0; | attr.ext.xrc.xrcd = devr->x0; | ||||
devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL); | devr->s0 = rdma_zalloc_drv_obj(ibdev, ib_srq); | ||||
if (IS_ERR(devr->s0)) { | if (!devr->s0) { | ||||
ret = PTR_ERR(devr->s0); | ret = -ENOMEM; | ||||
goto error4; | goto error4; | ||||
} | } | ||||
devr->s0->device = &dev->ib_dev; | devr->s0->device = &dev->ib_dev; | ||||
devr->s0->pd = devr->p0; | devr->s0->pd = devr->p0; | ||||
devr->s0->uobject = NULL; | |||||
devr->s0->event_handler = NULL; | |||||
devr->s0->srq_context = NULL; | |||||
devr->s0->srq_type = IB_SRQT_XRC; | devr->s0->srq_type = IB_SRQT_XRC; | ||||
devr->s0->ext.xrc.xrcd = devr->x0; | devr->s0->ext.xrc.xrcd = devr->x0; | ||||
devr->s0->ext.xrc.cq = devr->c0; | devr->s0->ext.cq = devr->c0; | ||||
ret = mlx5_ib_create_srq(devr->s0, &attr, NULL); | |||||
if (ret) | |||||
goto err_create; | |||||
atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt); | atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt); | ||||
atomic_inc(&devr->s0->ext.xrc.cq->usecnt); | atomic_inc(&devr->s0->ext.cq->usecnt); | ||||
atomic_inc(&devr->p0->usecnt); | atomic_inc(&devr->p0->usecnt); | ||||
atomic_set(&devr->s0->usecnt, 0); | atomic_set(&devr->s0->usecnt, 0); | ||||
memset(&attr, 0, sizeof(attr)); | memset(&attr, 0, sizeof(attr)); | ||||
attr.attr.max_sge = 1; | attr.attr.max_sge = 1; | ||||
attr.attr.max_wr = 1; | attr.attr.max_wr = 1; | ||||
attr.srq_type = IB_SRQT_BASIC; | attr.srq_type = IB_SRQT_BASIC; | ||||
devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL); | devr->s1 = rdma_zalloc_drv_obj(ibdev, ib_srq); | ||||
if (IS_ERR(devr->s1)) { | if (!devr->s1) { | ||||
ret = PTR_ERR(devr->s1); | ret = -ENOMEM; | ||||
goto error5; | goto error5; | ||||
} | } | ||||
devr->s1->device = &dev->ib_dev; | devr->s1->device = &dev->ib_dev; | ||||
devr->s1->pd = devr->p0; | devr->s1->pd = devr->p0; | ||||
devr->s1->uobject = NULL; | |||||
devr->s1->event_handler = NULL; | |||||
devr->s1->srq_context = NULL; | |||||
devr->s1->srq_type = IB_SRQT_BASIC; | devr->s1->srq_type = IB_SRQT_BASIC; | ||||
devr->s1->ext.xrc.cq = devr->c0; | devr->s1->ext.cq = devr->c0; | ||||
ret = mlx5_ib_create_srq(devr->s1, &attr, NULL); | |||||
if (ret) | |||||
goto error6; | |||||
atomic_inc(&devr->p0->usecnt); | atomic_inc(&devr->p0->usecnt); | ||||
atomic_set(&devr->s0->usecnt, 0); | atomic_set(&devr->s1->usecnt, 0); | ||||
for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) { | for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) { | ||||
INIT_WORK(&devr->ports[port].pkey_change_work, | INIT_WORK(&devr->ports[port].pkey_change_work, | ||||
pkey_change_handler); | pkey_change_handler); | ||||
devr->ports[port].devr = devr; | devr->ports[port].devr = devr; | ||||
} | } | ||||
return 0; | return 0; | ||||
error6: | |||||
kfree(devr->s1); | |||||
error5: | error5: | ||||
mlx5_ib_destroy_srq(devr->s0); | mlx5_ib_destroy_srq(devr->s0, NULL); | ||||
err_create: | |||||
kfree(devr->s0); | |||||
error4: | error4: | ||||
mlx5_ib_dealloc_xrcd(devr->x1); | mlx5_ib_dealloc_xrcd(devr->x1, NULL); | ||||
error3: | error3: | ||||
mlx5_ib_dealloc_xrcd(devr->x0); | mlx5_ib_dealloc_xrcd(devr->x0, NULL); | ||||
error2: | error2: | ||||
mlx5_ib_destroy_cq(devr->c0); | mlx5_ib_destroy_cq(devr->c0, NULL); | ||||
err_create_cq: | |||||
kfree(devr->c0); | |||||
error1: | error1: | ||||
mlx5_ib_dealloc_pd(devr->p0); | mlx5_ib_dealloc_pd(devr->p0, NULL); | ||||
error0: | error0: | ||||
kfree(devr->p0); | |||||
return ret; | return ret; | ||||
} | } | ||||
static void destroy_dev_resources(struct mlx5_ib_resources *devr) | static void destroy_dev_resources(struct mlx5_ib_resources *devr) | ||||
{ | { | ||||
struct mlx5_ib_dev *dev = | |||||
container_of(devr, struct mlx5_ib_dev, devr); | |||||
int port; | int port; | ||||
mlx5_ib_destroy_srq(devr->s1); | mlx5_ib_destroy_srq(devr->s1, NULL); | ||||
mlx5_ib_destroy_srq(devr->s0); | kfree(devr->s1); | ||||
mlx5_ib_dealloc_xrcd(devr->x0); | mlx5_ib_destroy_srq(devr->s0, NULL); | ||||
mlx5_ib_dealloc_xrcd(devr->x1); | kfree(devr->s0); | ||||
mlx5_ib_destroy_cq(devr->c0); | mlx5_ib_dealloc_xrcd(devr->x0, NULL); | ||||
mlx5_ib_dealloc_pd(devr->p0); | mlx5_ib_dealloc_xrcd(devr->x1, NULL); | ||||
mlx5_ib_destroy_cq(devr->c0, NULL); | |||||
kfree(devr->c0); | |||||
mlx5_ib_dealloc_pd(devr->p0, NULL); | |||||
kfree(devr->p0); | |||||
/* Make sure no change P_Key work items are still executing */ | /* Make sure no change P_Key work items are still executing */ | ||||
for (port = 0; port < dev->num_ports; ++port) | for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) | ||||
cancel_work_sync(&devr->ports[port].pkey_change_work); | cancel_work_sync(&devr->ports[port].pkey_change_work); | ||||
} | } | ||||
static u32 get_core_cap_flags(struct ib_device *ibdev) | static u32 get_core_cap_flags(struct ib_device *ibdev) | ||||
{ | { | ||||
struct mlx5_ib_dev *dev = to_mdev(ibdev); | struct mlx5_ib_dev *dev = to_mdev(ibdev); | ||||
enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1); | enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1); | ||||
u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type); | u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type); | ||||
▲ Show 20 Lines • Show All 295 Lines • ▼ Show 20 Lines | static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | ||||
if (err) | if (err) | ||||
goto err_free_port; | goto err_free_port; | ||||
if (mlx5_use_mad_ifc(dev)) | if (mlx5_use_mad_ifc(dev)) | ||||
get_ext_port_caps(dev); | get_ext_port_caps(dev); | ||||
MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock); | MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock); | ||||
INIT_IB_DEVICE_OPS(&dev->ib_dev.ops, mlx5, MLX5); | |||||
snprintf(dev->ib_dev.name, IB_DEVICE_NAME_MAX, "mlx5_%d", device_get_unit(mdev->pdev->dev.bsddev)); | snprintf(dev->ib_dev.name, IB_DEVICE_NAME_MAX, "mlx5_%d", device_get_unit(mdev->pdev->dev.bsddev)); | ||||
dev->ib_dev.owner = THIS_MODULE; | dev->ib_dev.owner = THIS_MODULE; | ||||
dev->ib_dev.node_type = RDMA_NODE_IB_CA; | dev->ib_dev.node_type = RDMA_NODE_IB_CA; | ||||
dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; | dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; | ||||
dev->num_ports = MLX5_CAP_GEN(mdev, num_ports); | dev->num_ports = MLX5_CAP_GEN(mdev, num_ports); | ||||
dev->ib_dev.phys_port_cnt = dev->num_ports; | dev->ib_dev.phys_port_cnt = dev->num_ports; | ||||
dev->ib_dev.num_comp_vectors = | dev->ib_dev.num_comp_vectors = | ||||
dev->mdev->priv.eq_table.num_comp_vectors; | dev->mdev->priv.eq_table.num_comp_vectors; | ||||
▲ Show 20 Lines • Show All 41 Lines • ▼ Show 20 Lines | static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | ||||
dev->ib_dev.add_gid = mlx5_ib_add_gid; | dev->ib_dev.add_gid = mlx5_ib_add_gid; | ||||
dev->ib_dev.del_gid = mlx5_ib_del_gid; | dev->ib_dev.del_gid = mlx5_ib_del_gid; | ||||
dev->ib_dev.query_pkey = mlx5_ib_query_pkey; | dev->ib_dev.query_pkey = mlx5_ib_query_pkey; | ||||
dev->ib_dev.modify_device = mlx5_ib_modify_device; | dev->ib_dev.modify_device = mlx5_ib_modify_device; | ||||
dev->ib_dev.modify_port = mlx5_ib_modify_port; | dev->ib_dev.modify_port = mlx5_ib_modify_port; | ||||
dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext; | dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext; | ||||
dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext; | dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext; | ||||
dev->ib_dev.mmap = mlx5_ib_mmap; | dev->ib_dev.mmap = mlx5_ib_mmap; | ||||
dev->ib_dev.mmap_free = mlx5_ib_mmap_free; | |||||
dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd; | dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd; | ||||
dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd; | dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd; | ||||
dev->ib_dev.create_ah = mlx5_ib_create_ah; | dev->ib_dev.create_ah = mlx5_ib_create_ah; | ||||
dev->ib_dev.query_ah = mlx5_ib_query_ah; | dev->ib_dev.query_ah = mlx5_ib_query_ah; | ||||
dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah; | dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah; | ||||
dev->ib_dev.create_srq = mlx5_ib_create_srq; | dev->ib_dev.create_srq = mlx5_ib_create_srq; | ||||
dev->ib_dev.modify_srq = mlx5_ib_modify_srq; | dev->ib_dev.modify_srq = mlx5_ib_modify_srq; | ||||
dev->ib_dev.query_srq = mlx5_ib_query_srq; | dev->ib_dev.query_srq = mlx5_ib_query_srq; | ||||
▲ Show 20 Lines • Show All 225 Lines • Show Last 20 Lines |