Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/ixl/iwarp/iw_ixl.c
- This file was added.
/******************************************************************************* | |||||
* | |||||
* Copyright (c) 2015-2017 Intel Corporation. All rights reserved. | |||||
* | |||||
* This software is available to you under a choice of one of two | |||||
* licenses. You may choose to be licensed under the terms of the GNU | |||||
* General Public License (GPL) Version 2, available from the file | |||||
* COPYING in the main directory of this source tree, or the | |||||
* OpenFabrics.org BSD license below: | |||||
* | |||||
* Redistribution and use in source and binary forms, with or | |||||
* without modification, are permitted provided that the following | |||||
* conditions are met: | |||||
* | |||||
* - Redistributions of source code must retain the above | |||||
* copyright notice, this list of conditions and the following | |||||
* disclaimer. | |||||
* | |||||
* - Redistributions in binary form must reproduce the above | |||||
* copyright notice, this list of conditions and the following | |||||
* disclaimer in the documentation and/or other materials | |||||
* provided with the distribution. | |||||
* | |||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |||||
* SOFTWARE. | |||||
* | |||||
*******************************************************************************/ | |||||
/*$FreeBSD$*/ | |||||
#include <sys/param.h> | |||||
#include <sys/module.h> | |||||
#include <sys/kernel.h> | |||||
#include <sys/systm.h> | |||||
#include <sys/bus.h> | |||||
#include <sys/rman.h> | |||||
#include <sys/socket.h> | |||||
#include <net/if.h> | |||||
#include <net/if_var.h> | |||||
#include <net/if_dl.h> | |||||
#include <machine/resource.h> | |||||
#include "iw_ixl.h" | |||||
#include "i40iw_register.h" | |||||
/** | |||||
* Driver version | |||||
*/ | |||||
char ixliw_driver_version[] = "0.2.0"; | |||||
/* | |||||
* TUNEABLE PARAMETERS: | |||||
*/ | |||||
static | |||||
SYSCTL_NODE(_hw, OID_AUTO, iw_ixl, CTLFLAG_RD, 0, | |||||
"IW_IXL driver parameters"); | |||||
static int ixliw_max_ceq = 0; | |||||
TUNABLE_INT("hw.iw_ixl.max_ceq", &ixliw_max_ceq); | |||||
SYSCTL_INT(_hw_iw_ixl, OID_AUTO, max_ceq, CTLFLAG_RDTUN, | |||||
&ixliw_max_ceq, 0, "iWARP max CEQ"); | |||||
static int ixliw_debug = 0; | |||||
TUNABLE_INT("hw.iw_ixl.debug", &ixliw_debug); | |||||
SYSCTL_INT(_hw_iw_ixl, OID_AUTO, debug, CTLFLAG_RWTUN, | |||||
&ixliw_debug, 0, "iWARP debug"); | |||||
static int ixliw_mpa_version = 2; | |||||
TUNABLE_INT("hw.iw_ixl.mpa_version", &ixliw_mpa_version); | |||||
SYSCTL_INT(_hw_iw_ixl, OID_AUTO, mpa_version, CTLFLAG_RDTUN, | |||||
&ixliw_mpa_version, 0, "iWARP mpa version"); | |||||
static int ixliw_push_mode = 0; | |||||
static LIST_HEAD(i40iw_handlers); | |||||
static spinlock_t i40iw_handler_lock; | |||||
/** | |||||
* i40iw_find_i40e_handler - find a handler given a client info | |||||
* @ldev: pointer to a client info | |||||
*/ | |||||
static struct i40iw_handler * | |||||
i40iw_find_handler(struct ixl_iw_pf *ldev) | |||||
{ | |||||
struct i40iw_handler *hdl; | |||||
unsigned long flags; | |||||
spin_lock_irqsave(&i40iw_handler_lock, flags); | |||||
list_for_each_entry(hdl, &i40iw_handlers, list) { | |||||
if (hdl->ldev.dev == ldev->dev) { | |||||
spin_unlock_irqrestore(&i40iw_handler_lock, flags); | |||||
return hdl; | |||||
} | |||||
} | |||||
spin_unlock_irqrestore(&i40iw_handler_lock, flags); | |||||
return NULL; | |||||
} | |||||
/** | |||||
* i40iw_add_handler - add a handler to the list | |||||
* @hdl: handler to be added to the handler list | |||||
*/ | |||||
static void | |||||
i40iw_add_handler(struct i40iw_handler *hdl) | |||||
{ | |||||
unsigned long flags; | |||||
spin_lock_irqsave(&i40iw_handler_lock, flags); | |||||
list_add(&hdl->list, &i40iw_handlers); | |||||
spin_unlock_irqrestore(&i40iw_handler_lock, flags); | |||||
} | |||||
/** | |||||
* i40iw_del_handler - delete a handler from the list | |||||
* @hdl: handler to be deleted from the handler list | |||||
*/ | |||||
static int | |||||
i40iw_del_handler(struct i40iw_handler *hdl) | |||||
{ | |||||
unsigned long flags; | |||||
spin_lock_irqsave(&i40iw_handler_lock, flags); | |||||
list_del(&hdl->list); | |||||
spin_unlock_irqrestore(&i40iw_handler_lock, flags); | |||||
return 0; | |||||
} | |||||
/** | |||||
* i40iw_enable_intr - set up device interrupts | |||||
* @dev: hardware control device structure | |||||
* @msix_id: id of the interrupt to be enabled | |||||
*/ | |||||
static void | |||||
i40iw_enable_intr(struct i40iw_sc_dev *dev, u32 msix_id) | |||||
{ | |||||
u32 val; | |||||
val = I40E_PFINT_DYN_CTLN_INTENA_MASK | | |||||
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | | |||||
(3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); | |||||
if (dev->is_pf) | |||||
wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_id - 1), val); | |||||
else | |||||
wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_id - 1), val); | |||||
} | |||||
/** | |||||
* i40iw_dpc - tasklet for aeq and ceq 0 | |||||
* @data: iwarp device | |||||
*/ | |||||
static void | |||||
i40iw_dpc(void *context, int pending) | |||||
{ | |||||
struct i40iw_device *iwdev = (struct i40iw_device *)context; | |||||
if (iwdev->msix_shared) | |||||
i40iw_process_ceq(iwdev, iwdev->ceqlist); | |||||
i40iw_process_aeq(iwdev); | |||||
i40iw_enable_intr(&iwdev->sc_dev, iwdev->iw_msixtbl[0].idx); | |||||
} | |||||
/** | |||||
* i40iw_ceq_dpc - dpc handler for CEQ | |||||
* @data: data points to CEQ | |||||
*/ | |||||
static void | |||||
i40iw_ceq_dpc(void *context, int pending) | |||||
{ | |||||
struct i40iw_ceq *iwceq = (struct i40iw_ceq *)context; | |||||
struct i40iw_device *iwdev = iwceq->iwdev; | |||||
i40iw_process_ceq(iwdev, iwceq); | |||||
i40iw_enable_intr(&iwdev->sc_dev, iwceq->msix_idx); | |||||
} | |||||
/** | |||||
* i40iw_irq_handler - interrupt handler for aeq and ceq0 | |||||
* @irq: Interrupt request number | |||||
* @data: iwarp device | |||||
*/ | |||||
static void | |||||
i40iw_irq_handler(void *data) | |||||
{ | |||||
struct i40iw_device *iwdev = (struct i40iw_device *)data; | |||||
taskqueue_enqueue(iwdev->irq_tq, &iwdev->irq_task); | |||||
return; | |||||
} | |||||
/** | |||||
* i40iw_destroy_cqp - destroy control qp | |||||
* @iwdev: iwarp device | |||||
* @create_done: 1 if cqp create poll was success | |||||
* | |||||
* Issue destroy cqp request and | |||||
* free the resources associated with the cqp | |||||
*/ | |||||
static void | |||||
i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp) | |||||
{ | |||||
struct i40iw_sc_dev *dev = &iwdev->sc_dev; | |||||
struct i40iw_cqp *cqp = &iwdev->cqp; | |||||
INIT_DBG_DEV(iwdev->ldev->dev, "begin"); | |||||
if (free_hwcqp) | |||||
dev->cqp_ops->cqp_destroy(dev->cqp); | |||||
i40iw_cleanup_pending_cqp_op(iwdev); | |||||
ixliw_free_dma_mem(&cqp->sq); | |||||
kfree(cqp->scratch_array); | |||||
iwdev->cqp.scratch_array = NULL; | |||||
kfree(cqp->cqp_requests); | |||||
cqp->cqp_requests = NULL; | |||||
} | |||||
/** | |||||
* i40iw_disable_irqs - disable device interrupts | |||||
* @dev: hardware control device structure | |||||
* @msic_vec: msix vector to disable irq | |||||
* @dev_id: parameter to pass to free_irq (used during irq setup) | |||||
* | |||||
* The function is called when destroying aeq/ceq | |||||
*/ | |||||
static void | |||||
i40iw_disable_irq(struct i40iw_device *iwdev, | |||||
struct i40iw_msix_vector *msix_vec) | |||||
{ | |||||
struct i40iw_sc_dev *dev = &iwdev->sc_dev; | |||||
int rid = msix_vec->idx + 1; | |||||
wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0); | |||||
if (msix_vec->tag != NULL) { | |||||
bus_teardown_intr(iwdev->ldev->dev, msix_vec->res, | |||||
msix_vec->tag); | |||||
msix_vec->tag = NULL; | |||||
} | |||||
if (msix_vec->res != NULL) { | |||||
bus_release_resource(iwdev->ldev->dev, SYS_RES_IRQ, rid, | |||||
msix_vec->res); | |||||
msix_vec->res = NULL; | |||||
} | |||||
} | |||||
/** | |||||
* i40iw_destroy_aeq - destroy aeq | |||||
* @iwdev: iwarp device | |||||
* @reset: TRUE if called before reset | |||||
* | |||||
* Issue a destroy aeq request and | |||||
* free the resources associated with the aeq | |||||
* The function is called during driver unload | |||||
*/ | |||||
static void | |||||
i40iw_destroy_aeq(struct i40iw_device *iwdev) | |||||
{ | |||||
struct i40iw_aeq *aeq = &iwdev->aeq; | |||||
struct i40iw_sc_dev *dev = &iwdev->sc_dev; | |||||
enum i40iw_status_code status = I40IW_ERR_NOT_READY; | |||||
i40iw_debug(dev, I40IW_DEBUG_INIT, "%s: begin\n", __func__); | |||||
if (!iwdev->msix_shared) { | |||||
i40iw_disable_irq(iwdev, iwdev->iw_msixtbl); | |||||
taskqueue_drain(iwdev->irq_tq, &iwdev->irq_task); | |||||
taskqueue_free(iwdev->irq_tq); | |||||
} | |||||
if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1)) | |||||
status = dev->aeq_ops->aeq_destroy_done(&aeq->sc_aeq); | |||||
if (status) | |||||
i40iw_debug(dev, I40IW_DEBUG_AEQ, "destroy aeq failed %d\n", status); | |||||
ixliw_free_dma_mem(&aeq->mem); | |||||
} | |||||
/** | |||||
* i40iw_destroy_ceq - destroy ceq | |||||
* @iwdev: iwarp device | |||||
* @iwceq: ceq to be destroyed | |||||
* @reset: TRUE if called before reset | |||||
* | |||||
* Issue a destroy ceq request and | |||||
* free the resources associated with the ceq | |||||
*/ | |||||
static void | |||||
i40iw_destroy_ceq(struct i40iw_device *iwdev, struct i40iw_ceq *iwceq) | |||||
{ | |||||
struct i40iw_sc_dev *dev = &iwdev->sc_dev; | |||||
enum i40iw_status_code status; | |||||
INIT_DBG_DEV(iwdev->ldev->dev, "begin"); | |||||
status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1); | |||||
if (status) { | |||||
i40iw_debug(dev, I40IW_DEBUG_DEV, | |||||
"ceq destroy command failed %d\n", status); | |||||
goto exit; | |||||
} | |||||
status = dev->ceq_ops->cceq_destroy_done(&iwceq->sc_ceq); | |||||
if (status) | |||||
i40iw_debug(dev, I40IW_DEBUG_DEV, | |||||
"ceq destroy completion failed %d\n", status); | |||||
exit: | |||||
ixliw_free_dma_mem(&iwceq->mem); | |||||
} | |||||
/** | |||||
* i40iw_del_ceqs - destroy all ceq's | |||||
* @iwdev: iwarp device | |||||
* | |||||
* Go through all of the device ceq's and for each ceq | |||||
* disable the ceq interrupt and destroy the ceq | |||||
*/ | |||||
static void | |||||
i40iw_del_ceqs(struct i40iw_device *iwdev) | |||||
{ | |||||
struct i40iw_ceq *iwceq = iwdev->ceqlist; | |||||
struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl; | |||||
u32 i = 0; | |||||
if (iwdev->msix_shared) { | |||||
i40iw_disable_irq(iwdev, msix_vec); | |||||
taskqueue_drain(iwdev->irq_tq, &iwdev->irq_task); | |||||
taskqueue_free(iwdev->irq_tq); | |||||
i40iw_destroy_ceq(iwdev, iwceq); | |||||
iwceq++; | |||||
i++; | |||||
} | |||||
for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) { | |||||
i40iw_disable_irq(iwdev, msix_vec); | |||||
taskqueue_drain(iwceq->irq_tq, &iwceq->irq_task); | |||||
taskqueue_free(iwceq->irq_tq); | |||||
i40iw_destroy_ceq(iwdev, iwceq); | |||||
} | |||||
iwdev->sc_dev.ceq_valid = FALSE; | |||||
} | |||||
/** | |||||
* i40iw_destroy_ccq - destroy control cq | |||||
* @iwdev: iwarp device | |||||
* | |||||
* Issue destroy ccq request and | |||||
* free the resources associated with the ccq | |||||
*/ | |||||
static void | |||||
i40iw_destroy_ccq(struct i40iw_device *iwdev) | |||||
{ | |||||
struct i40iw_ccq *ccq = &iwdev->ccq; | |||||
struct i40iw_sc_dev *dev = &iwdev->sc_dev; | |||||
enum i40iw_status_code status = 0; | |||||
status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, TRUE); | |||||
if (status) | |||||
i40iw_debug(dev, I40IW_DEBUG_DEV, | |||||
"ccq destroy failed %d\n", status); | |||||
ixliw_free_dma_mem(&ccq->mem_cq); | |||||
} | |||||
/* types of hmc objects */ | |||||
static enum i40iw_hmc_rsrc_type iw_hmc_obj_types[] = { | |||||
I40IW_HMC_IW_QP, | |||||
I40IW_HMC_IW_CQ, | |||||
I40IW_HMC_IW_HTE, | |||||
I40IW_HMC_IW_ARP, | |||||
I40IW_HMC_IW_APBVT_ENTRY, | |||||
I40IW_HMC_IW_MR, | |||||
I40IW_HMC_IW_XF, | |||||
I40IW_HMC_IW_XFFL, | |||||
I40IW_HMC_IW_Q1, | |||||
I40IW_HMC_IW_Q1FL, | |||||
I40IW_HMC_IW_TIMER, | |||||
}; | |||||
/** | |||||
* i40iw_close_hmc_objects_type - delete hmc objects of a given type | |||||
* @iwdev: iwarp device | |||||
* @obj_type: the hmc object type to be deleted | |||||
* @is_pf: TRUE if the function is PF otherwise FALSE | |||||
* @reset: TRUE if called before reset | |||||
*/ | |||||
static void | |||||
i40iw_close_hmc_objects_type(struct i40iw_sc_dev *dev, | |||||
enum i40iw_hmc_rsrc_type obj_type, struct i40iw_hmc_info *hmc_info) | |||||
{ | |||||
struct i40iw_hmc_del_obj_info info; | |||||
memset(&info, 0, sizeof(info)); | |||||
info.hmc_info = hmc_info; | |||||
info.rsrc_type = obj_type; | |||||
info.count = hmc_info->hmc_obj[obj_type].cnt; | |||||
info.is_pf = TRUE; | |||||
if (dev->hmc_ops->del_hmc_object(dev, &info, FALSE)) | |||||
DPRINTF("iw_ixl: del obj of type %d failed\n", obj_type); | |||||
} | |||||
/** | |||||
* i40iw_del_hmc_objects - remove all device hmc objects | |||||
* @dev: iwarp device | |||||
* @hmc_info: hmc_info to free | |||||
*/ | |||||
static void | |||||
i40iw_del_hmc_objects(struct i40iw_sc_dev *dev, | |||||
struct i40iw_hmc_info *hmc_info) | |||||
{ | |||||
unsigned int i; | |||||
for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) | |||||
i40iw_close_hmc_objects_type(dev, iw_hmc_obj_types[i], hmc_info); | |||||
} | |||||
/** | |||||
* i40iw_ceq_handler - interrupt handler for ceq | |||||
* @data: ceq pointer | |||||
*/ | |||||
static void | |||||
i40iw_ceq_handler(void *data) | |||||
{ | |||||
struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data; | |||||
taskqueue_enqueue(iwceq->irq_tq, &iwceq->irq_task); | |||||
return; | |||||
} | |||||
/** | |||||
* i40iw_create_hmc_obj_type - create hmc object of a given type | |||||
* @dev: hardware control device structure | |||||
* @info: information for the hmc object to create | |||||
*/ | |||||
static enum i40iw_status_code | |||||
i40iw_create_hmc_obj_type(struct i40iw_sc_dev *dev, | |||||
struct i40iw_hmc_create_obj_info *info) | |||||
{ | |||||
return dev->hmc_ops->create_hmc_object(dev, info); | |||||
} | |||||
/** | |||||
* i40iw_create_hmc_objs - create all hmc objects for the device | |||||
* @iwdev: iwarp device | |||||
* @is_pf: TRUE if the function is PF otherwise FALSE | |||||
* | |||||
* Create the device hmc objects and allocate hmc pages | |||||
* Return 0 if successful, otherwise clean up and return error | |||||
*/ | |||||
static enum i40iw_status_code | |||||
i40iw_create_hmc_objs(struct i40iw_device *iwdev, bool is_pf) | |||||
{ | |||||
struct i40iw_sc_dev *dev = &iwdev->sc_dev; | |||||
struct i40iw_hmc_create_obj_info info; | |||||
enum i40iw_status_code status; | |||||
int i; | |||||
i40iw_debug(dev, I40IW_DEBUG_INIT, "%s: begin\n", __func__); | |||||
memset(&info, 0, sizeof(info)); | |||||
info.hmc_info = dev->hmc_info; | |||||
info.is_pf = is_pf; | |||||
info.entry_type = iwdev->sd_type; | |||||
for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) { | |||||
info.rsrc_type = iw_hmc_obj_types[i]; | |||||
info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt; | |||||
info.add_sd_cnt = 0; | |||||
status = i40iw_create_hmc_obj_type(dev, &info); | |||||
if (status) { | |||||
i40iw_debug(dev, I40IW_DEBUG_HMC, "create obj type %d status = %d\n", | |||||
iw_hmc_obj_types[i], status); | |||||
break; | |||||
} | |||||
} | |||||
if (!status) | |||||
return (dev->cqp_misc_ops->static_hmc_pages_allocated(dev->cqp, 0, | |||||
dev->hmc_fn_id, | |||||
TRUE, true)); | |||||
while (i) { | |||||
i--; | |||||
/* destroy the hmc objects of a given type */ | |||||
i40iw_close_hmc_objects_type(dev, | |||||
iw_hmc_obj_types[i], | |||||
dev->hmc_info); | |||||
} | |||||
return status; | |||||
} | |||||
/** | |||||
* i40iw_obj_aligned_mem - get aligned memory from device allocated memory | |||||
* @iwdev: iwarp device | |||||
* @memptr: points to the memory addresses | |||||
* @size: size of memory needed | |||||
* @mask: mask for the aligned memory | |||||
* | |||||
* Get aligned memory of the requested size and | |||||
* update the memptr to point to the new aligned memory | |||||
* Return 0 if successful, otherwise return no memory error | |||||
*/ | |||||
enum i40iw_status_code | |||||
i40iw_obj_aligned_mem(struct i40iw_device *iwdev, struct i40iw_dma_mem *memptr, | |||||
u32 size, u32 mask) | |||||
{ | |||||
uintptr_t va, newva; | |||||
uintptr_t extra; | |||||
va = (uintptr_t)iwdev->obj_next.va; | |||||
newva = va; | |||||
if (mask) | |||||
newva = ALIGN(va, (uintptr_t)(mask + 1)); | |||||
extra = newva - va; | |||||
memptr->va = (u8 *)va + extra; | |||||
memptr->pa = iwdev->obj_next.pa + extra; | |||||
memptr->size = size; | |||||
if (((u8 *)memptr->va + size) > ((u8 *)iwdev->obj_mem.va + iwdev->obj_mem.size)) | |||||
return I40IW_ERR_NO_MEMORY; | |||||
iwdev->obj_next.va = (u8 *)memptr->va + size; | |||||
iwdev->obj_next.pa = memptr->pa + size; | |||||
return 0; | |||||
} | |||||
/** | |||||
* i40iw_create_ccq - create control cq | |||||
* @iwdev: iwarp device | |||||
* | |||||
* Return 0, if the ccq and the resources associated with it | |||||
* are successfully created, otherwise return error | |||||
*/ | |||||
static enum i40iw_status_code | |||||
i40iw_create_ccq(struct i40iw_device *iwdev) | |||||
{ | |||||
struct i40iw_ccq *ccq = &iwdev->ccq; | |||||
struct i40iw_sc_dev *dev = &iwdev->sc_dev; | |||||
struct i40iw_ccq_init_info info; | |||||
struct i40iw_dma_mem mem; | |||||
enum i40iw_status_code status; | |||||
i40iw_debug(dev, I40IW_DEBUG_INIT, "%s: begin\n", __func__); | |||||
dev->ccq = &ccq->sc_cq; | |||||
dev->ccq->dev = dev; | |||||
ccq->shadow_area.size = sizeof(struct i40iw_cq_shadow_area); | |||||
ccq->mem_cq.size = sizeof(struct i40iw_cqe) * IW_CCQ_SIZE; | |||||
status = i40iw_allocate_dma_mem(dev->hw, &ccq->mem_cq, | |||||
ccq->mem_cq.size, I40IW_CQ0_ALIGNMENT); | |||||
if (status) | |||||
goto exit; | |||||
status = i40iw_obj_aligned_mem(iwdev, &mem, ccq->shadow_area.size, | |||||
I40IW_SHADOWAREA_MASK); | |||||
if (status) | |||||
goto exit; | |||||
ccq->sc_cq.back_cq = (void *)ccq; | |||||
/* populate the ccq init info */ | |||||
memset(&info, 0, sizeof(info)); | |||||
info.dev = dev; | |||||
info.cq_base = ccq->mem_cq.va; | |||||
info.cq_pa = ccq->mem_cq.pa; | |||||
info.num_elem = IW_CCQ_SIZE; | |||||
info.shadow_area = mem.va; | |||||
info.shadow_area_pa = mem.pa; | |||||
info.ceqe_mask = FALSE; | |||||
info.ceq_id_valid = TRUE; | |||||
info.shadow_read_threshold = 16; | |||||
status = dev->ccq_ops->ccq_init(dev->ccq, &info); | |||||
if (!status) | |||||
status = dev->ccq_ops->ccq_create(dev->ccq, 0, TRUE, true); | |||||
exit: | |||||
if (status) | |||||
ixliw_free_dma_mem(&ccq->mem_cq); | |||||
return status; | |||||
} | |||||
/** | |||||
* i40iw_create_cqp - create control qp | |||||
* @iwdev: iwarp device | |||||
* | |||||
* Return 0, if the cqp and all the resources associated with it | |||||
* are successfully created, otherwise return error | |||||
*/ | |||||
static enum i40iw_status_code | |||||
i40iw_create_cqp(struct i40iw_device *iwdev) | |||||
{ | |||||
struct i40iw_dma_mem mem; | |||||
struct i40iw_sc_dev *dev = &iwdev->sc_dev; | |||||
struct i40iw_cqp_init_info cqp_init_info; | |||||
struct i40iw_cqp *cqp = &iwdev->cqp; | |||||
int i; | |||||
u32 sqsize = I40IW_CQP_SW_SQSIZE_2048; | |||||
u16 maj_err, min_err; | |||||
enum i40iw_status_code status; | |||||
i40iw_debug(dev, I40IW_DEBUG_INIT, "%s: begin\n", __func__); | |||||
cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL); | |||||
if (!cqp->cqp_requests) | |||||
return I40IW_ERR_NO_MEMORY; | |||||
cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL); | |||||
if (!cqp->scratch_array) { | |||||
kfree(cqp->cqp_requests); | |||||
return I40IW_ERR_NO_MEMORY; | |||||
} | |||||
dev->cqp = &cqp->sc_cqp; | |||||
dev->cqp->dev = dev; | |||||
status = i40iw_allocate_dma_mem(dev->hw, &cqp->sq, | |||||
(sizeof(struct i40iw_cqp_sq_wqe) * sqsize), I40IW_CQP_ALIGNMENT); | |||||
if (status) | |||||
goto exit; | |||||
status = i40iw_obj_aligned_mem(iwdev, &mem, sizeof(struct i40iw_cqp_ctx), | |||||
I40IW_HOST_CTX_ALIGNMENT_MASK); | |||||
if (status) | |||||
goto exit; | |||||
dev->cqp->host_ctx_pa = mem.pa; | |||||
dev->cqp->host_ctx = mem.va; | |||||
/* populate the cqp init info */ | |||||
memset(&cqp_init_info, 0, sizeof(cqp_init_info)); | |||||
cqp_init_info.dev = dev; | |||||
cqp_init_info.sq_size = sqsize; | |||||
cqp_init_info.sq = cqp->sq.va; | |||||
cqp_init_info.sq_pa = cqp->sq.pa; | |||||
cqp_init_info.host_ctx_pa = mem.pa; | |||||
cqp_init_info.host_ctx = mem.va; | |||||
cqp_init_info.hmc_profile = iwdev->resource_profile; | |||||
cqp_init_info.enabled_vf_count = iwdev->max_rdma_vfs; | |||||
cqp_init_info.scratch_array = cqp->scratch_array; | |||||
status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info); | |||||
if (status) { | |||||
i40iw_debug(dev, I40IW_DEBUG_CQP, "cqp init status %d\n", status); | |||||
goto exit; | |||||
} | |||||
status = dev->cqp_ops->cqp_create(dev->cqp, &maj_err, &min_err); | |||||
if (status) { | |||||
i40iw_debug(dev, I40IW_DEBUG_ERR | I40IW_DEBUG_CQP, | |||||
"cqp create status %d maj_err %d min_err %d\n", status, | |||||
maj_err, min_err); | |||||
goto exit; | |||||
} | |||||
spin_lock_init(&cqp->req_lock); | |||||
INIT_LIST_HEAD(&cqp->cqp_avail_reqs); | |||||
INIT_LIST_HEAD(&cqp->cqp_pending_reqs); | |||||
/* init the waitq of the cqp_requests and add them to the list */ | |||||
for (i = 0; i < sqsize; i++) { | |||||
init_waitqueue_head(&cqp->cqp_requests[i].waitq); | |||||
list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs); | |||||
} | |||||
return 0; | |||||
exit: | |||||
/* clean up the created resources */ | |||||
i40iw_destroy_cqp(iwdev, FALSE); | |||||
return status; | |||||
} | |||||
/** | |||||
* i40iw_configure_ceq_vector - set up the msix interrupt vector for ceq | |||||
* @iwdev: iwarp device | |||||
* @msix_vec: interrupt vector information | |||||
* @iwceq: ceq associated with the vector | |||||
* @ceq_id: the id number of the iwceq | |||||
* | |||||
* Allocate interrupt resources and enable irq handling | |||||
* Return 0 if successful, otherwise return error | |||||
*/ | |||||
static enum i40iw_status_code | |||||
i40iw_configure_ceq_vector(struct i40iw_device *iwdev, struct i40iw_ceq *iwceq, | |||||
u32 ceq_id, struct i40iw_msix_vector *msix_vec) | |||||
{ | |||||
device_t dev = iwdev->ldev->dev; | |||||
int err; | |||||
int rid = msix_vec->idx + 1; | |||||
enum i40iw_status_code status; | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_INIT, "%s: begin\n", __func__); | |||||
if (ceq_id == 0) { | |||||
msix_vec->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, | |||||
RF_SHAREABLE | RF_ACTIVE); | |||||
if (!msix_vec->res) { | |||||
i40iw_debug(&iwdev->sc_dev, | |||||
I40IW_DEBUG_ERR | I40IW_DEBUG_DEV | I40IW_DEBUG_AEQ, | |||||
"Unable to allocate bus resource: aeq-ceq interrupt [%d]\n", | |||||
rid); | |||||
return I40IW_ERR_CONFIG; | |||||
} | |||||
err = bus_setup_intr(dev, msix_vec->res, INTR_TYPE_NET | INTR_MPSAFE, | |||||
NULL, i40iw_irq_handler, iwdev, &msix_vec->tag); | |||||
if (err) { | |||||
i40iw_debug(&iwdev->sc_dev, | |||||
I40IW_DEBUG_ERR | I40IW_DEBUG_DEV | I40IW_DEBUG_AEQ, | |||||
"Failed to register aeq-ceq handler\n"); | |||||
status = I40IW_ERR_CONFIG; | |||||
goto fail_intr; | |||||
} | |||||
bus_describe_intr(dev, msix_vec->res, msix_vec->tag, | |||||
"aeq-ceq"); | |||||
TASK_INIT(&iwdev->irq_task, 0, i40iw_dpc, iwdev); | |||||
iwdev->irq_tq = taskqueue_create_fast("iw_ixl_aeq-ceq", | |||||
M_NOWAIT, taskqueue_thread_enqueue, &iwdev->irq_tq); | |||||
if (iwdev->irq_tq == NULL) { | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_ERR | I40IW_DEBUG_DEV | I40IW_DEBUG_AEQ, | |||||
"Failed to create aeq-ceq taskqueue\n"); | |||||
status = I40IW_ERR_NO_MEMORY; | |||||
goto fail_tq; | |||||
} | |||||
taskqueue_start_threads(&iwdev->irq_tq, 1, PI_NET, | |||||
"%s iwarp aeq-ceq", device_get_nameunit(dev)); | |||||
} else { | |||||
msix_vec->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, | |||||
RF_SHAREABLE | RF_ACTIVE); | |||||
if (!msix_vec->res) { | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_ERR | I40IW_DEBUG_DEV, | |||||
"Unable to allocate bus resource: ceq interrupt [%d]\n", | |||||
rid); | |||||
return I40IW_ERR_CONFIG; | |||||
} | |||||
err = bus_setup_intr(dev, msix_vec->res, INTR_TYPE_NET | INTR_MPSAFE, | |||||
NULL, i40iw_ceq_handler, iwceq, &msix_vec->tag); | |||||
if (err) { | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_ERR | I40IW_DEBUG_DEV, | |||||
"Failed to register ceq handler\n"); | |||||
status = I40IW_ERR_CONFIG; | |||||
goto fail_intr; | |||||
} | |||||
bus_describe_intr(dev, msix_vec->res, msix_vec->tag, "ceq"); | |||||
TASK_INIT(&iwceq->irq_task, 0, i40iw_ceq_dpc, iwceq); | |||||
iwceq->irq_tq = taskqueue_create_fast("iw_ixl_ceq", M_NOWAIT, | |||||
taskqueue_thread_enqueue, &iwceq->irq_tq); | |||||
if (iwceq->irq_tq == NULL) { | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_ERR | I40IW_DEBUG_DEV, | |||||
"Failed to create ceq taskqueue\n"); | |||||
status = I40IW_ERR_NO_MEMORY; | |||||
goto fail_tq; | |||||
} | |||||
taskqueue_start_threads(&iwceq->irq_tq, 1, PI_NET, | |||||
"%s iwarp ceq-%d", device_get_nameunit(dev), ceq_id); | |||||
} | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_INIT, | |||||
"%s ceq_id=%d, msix_vec->idx=%d, cpu_id=%d\n", __func__, ceq_id, | |||||
msix_vec->idx, msix_vec->cpu_affinity); | |||||
bus_bind_intr(dev, msix_vec->res, msix_vec->cpu_affinity); | |||||
return 0; | |||||
fail_tq: | |||||
bus_teardown_intr(dev, msix_vec->res, msix_vec->tag); | |||||
msix_vec->tag = NULL; | |||||
fail_intr: | |||||
bus_release_resource(dev, SYS_RES_IRQ, rid, msix_vec->res); | |||||
msix_vec->res = NULL; | |||||
return (status); | |||||
} | |||||
/** | |||||
* i40iw_create_ceq - create completion event queue | |||||
* @iwdev: iwarp device | |||||
* @iwceq: pointer to the ceq resources to be created | |||||
* @ceq_id: the id number of the iwceq | |||||
* | |||||
* Return 0, if the ceq and the resources associated with it | |||||
* are successfully created, otherwise return error | |||||
*/ | |||||
static enum i40iw_status_code | |||||
i40iw_create_ceq(struct i40iw_device *iwdev, struct i40iw_ceq *iwceq, | |||||
u32 ceq_id) | |||||
{ | |||||
struct i40iw_sc_dev *dev = &iwdev->sc_dev; | |||||
struct i40iw_ceq_init_info info; | |||||
u64 scratch; | |||||
enum i40iw_status_code status; | |||||
i40iw_debug(dev, I40IW_DEBUG_INIT, "%s: begin\n", __func__); | |||||
memset(&info, 0, sizeof(info)); | |||||
info.ceq_id = ceq_id; | |||||
iwceq->iwdev = iwdev; | |||||
iwceq->mem.size = sizeof(struct i40iw_ceqe) * | |||||
iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt; | |||||
status = i40iw_allocate_dma_mem(dev->hw, &iwceq->mem, iwceq->mem.size, | |||||
I40IW_CEQ_ALIGNMENT); | |||||
if (status) | |||||
goto exit; | |||||
info.ceq_id = ceq_id; | |||||
info.ceqe_base = iwceq->mem.va; | |||||
info.ceqe_pa = iwceq->mem.pa; | |||||
info.elem_cnt = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt; | |||||
iwceq->sc_ceq.ceq_id = ceq_id; | |||||
info.dev = dev; | |||||
scratch = (uintptr_t)&iwdev->cqp.sc_cqp; | |||||
status = dev->ceq_ops->ceq_init(&iwceq->sc_ceq, &info); | |||||
if (!status) | |||||
status = dev->ceq_ops->cceq_create(&iwceq->sc_ceq, scratch); | |||||
exit: | |||||
if (status) | |||||
ixliw_free_dma_mem(&iwceq->mem); | |||||
return status; | |||||
} | |||||
void | |||||
i40iw_request_reset(struct i40iw_device *iwdev) | |||||
{ | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_ERR | I40IW_DEBUG_INIT | | |||||
I40IW_DEBUG_CQP, "%s: begin\n", __func__); | |||||
ixl_iw_pf_reset(iwdev->hdl->ldev.handle); | |||||
} | |||||
/** | |||||
* i40iw_setup_ceqs - manage the device ceq's and their interrupt resources | |||||
* @iwdev: iwarp device | |||||
* @ldev: i40e lan device | |||||
* | |||||
* Allocate a list for all device completion event queues | |||||
* Create the ceq's and configure their msix interrupt vectors | |||||
* Return 0, if at least one ceq is successfully set up, otherwise return error | |||||
*/ | |||||
static enum i40iw_status_code | |||||
i40iw_setup_ceqs(struct i40iw_device *iwdev) | |||||
{ | |||||
struct i40iw_ceq *iwceq; | |||||
struct i40iw_msix_vector *msix_vec; | |||||
enum i40iw_status_code status = 0; | |||||
int err; | |||||
u32 ceq_id; | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_INIT, "%s: begin\n", __func__); | |||||
err = ixl_iw_pf_msix_init(iwdev->hdl->ldev.handle, | |||||
&iwdev->msix_mapping); | |||||
if (err) { | |||||
status = I40IW_ERR_CONFIG; | |||||
goto exit; | |||||
} | |||||
iwdev->ceqlist = kcalloc(iwdev->msix_count, sizeof(*iwdev->ceqlist), GFP_KERNEL); | |||||
if (!iwdev->ceqlist) { | |||||
status = I40IW_ERR_NO_MEMORY; | |||||
goto exit; | |||||
} | |||||
for (ceq_id = 0; ceq_id < iwdev->msix_count; ceq_id++) { | |||||
iwceq = &iwdev->ceqlist[ceq_id]; | |||||
status = i40iw_create_ceq(iwdev, iwceq, ceq_id); | |||||
if (status) { | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DEV, "create ceq status = %d\n", status); | |||||
break; | |||||
} | |||||
msix_vec = &iwdev->iw_msixtbl[ceq_id]; | |||||
iwceq->msix_idx = msix_vec->idx; | |||||
status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec); | |||||
if (status) { | |||||
i40iw_destroy_ceq(iwdev, iwceq); | |||||
break; | |||||
} | |||||
i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx); | |||||
iwdev->ceqs_count++; | |||||
} | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_INIT | I40IW_DEBUG_DEV, | |||||
"%d MSIX interrupts used for this device\n", iwdev->ceqs_count); | |||||
exit: | |||||
if (status && !iwdev->ceqs_count) { | |||||
kfree(iwdev->ceqlist); | |||||
iwdev->ceqlist = NULL; | |||||
return status; | |||||
} else { | |||||
iwdev->sc_dev.ceq_valid = TRUE; | |||||
return 0; | |||||
} | |||||
} | |||||
/** | |||||
* i40iw_create_aeq - create async event queue | |||||
* @iwdev: iwarp device | |||||
* | |||||
* Return 0, if the aeq and the resources associated with it | |||||
* are successfully created, otherwise return error | |||||
*/ | |||||
static enum i40iw_status_code | |||||
i40iw_create_aeq(struct i40iw_device *iwdev) | |||||
{ | |||||
struct i40iw_aeq *aeq = &iwdev->aeq; | |||||
struct i40iw_sc_dev *dev = &iwdev->sc_dev; | |||||
struct i40iw_aeq_init_info info; | |||||
u64 scratch = 0; | |||||
u32 aeq_size; | |||||
enum i40iw_status_code status; | |||||
i40iw_debug(dev, I40IW_DEBUG_INIT, "%s: begin\n", __func__); | |||||
aeq_size = 2 * iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt + | |||||
iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt; | |||||
aeq->mem.size = sizeof(struct i40iw_sc_aeqe) * aeq_size; | |||||
status = i40iw_allocate_dma_mem(dev->hw, &aeq->mem, aeq->mem.size, | |||||
I40IW_AEQ_ALIGNMENT); | |||||
if (status) | |||||
goto exit; | |||||
memset(&info, 0, sizeof(info)); | |||||
info.aeqe_base = aeq->mem.va; | |||||
info.aeq_elem_pa = aeq->mem.pa; | |||||
info.elem_cnt = aeq_size; | |||||
info.dev = dev; | |||||
status = dev->aeq_ops->aeq_init(&aeq->sc_aeq, &info); | |||||
if (status) | |||||
goto exit; | |||||
status = dev->aeq_ops->aeq_create(&aeq->sc_aeq, scratch, 1); | |||||
if (!status) | |||||
status = dev->aeq_ops->aeq_create_done(&aeq->sc_aeq); | |||||
exit: | |||||
if (status) | |||||
ixliw_free_dma_mem(&aeq->mem); | |||||
return status; | |||||
} | |||||
/** | |||||
* i40iw_setup_aeq - set up the device aeq | |||||
* @iwdev: iwarp device | |||||
* | |||||
* Create the aeq and configure its msix interrupt vector | |||||
* Return 0 if successful, otherwise return error | |||||
*/ | |||||
static enum i40iw_status_code | |||||
i40iw_setup_aeq(struct i40iw_device *iwdev) | |||||
{ | |||||
enum i40iw_status_code status; | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_INIT, "%s: begin\n", __func__); | |||||
status = i40iw_create_aeq(iwdev); | |||||
if (status) | |||||
return status; | |||||
return 0; | |||||
} | |||||
/** | |||||
* i40iw_initialize_ilq - create iwarp local queue for cm | |||||
* @iwdev: iwarp device | |||||
* | |||||
* Return 0 if successful, otherwise return error | |||||
*/ | |||||
static enum i40iw_status_code | |||||
i40iw_initialize_ilq(struct i40iw_device *iwdev) | |||||
{ | |||||
struct i40iw_puda_rsrc_info info; | |||||
enum i40iw_status_code status; | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_INIT, "%s: begin\n", __func__); | |||||
memset(&info, 0, sizeof(info)); | |||||
info.type = I40IW_PUDA_RSRC_TYPE_ILQ; | |||||
info.cq_id = 1; | |||||
info.qp_id = 0; | |||||
info.count = 1; | |||||
info.pd_id = 1; | |||||
info.sq_size = 8192; | |||||
info.rq_size = 8192; | |||||
info.buf_size = 1024; | |||||
info.tx_buf_cnt = 16384; | |||||
info.receive = i40iw_receive_ilq; | |||||
info.xmit_complete = i40iw_free_sqbuf; | |||||
info.mss = iwdev->vsi.mtu; | |||||
status = i40iw_puda_create_rsrc(&iwdev->vsi, &info); | |||||
if (status) | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_ILQ, "ilq create fail\n"); | |||||
return status; | |||||
} | |||||
/** | |||||
* i40iw_initialize_ieq - create iwarp exception queue | |||||
* @iwdev: iwarp device | |||||
* | |||||
* Return 0 if successful, otherwise return error | |||||
*/ | |||||
static enum i40iw_status_code | |||||
i40iw_initialize_ieq(struct i40iw_device *iwdev) | |||||
{ | |||||
struct i40iw_puda_rsrc_info info; | |||||
enum i40iw_status_code status; | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_INIT, "%s: begin\n", __func__); | |||||
memset(&info, 0, sizeof(info)); | |||||
info.type = I40IW_PUDA_RSRC_TYPE_IEQ; | |||||
info.cq_id = 2; | |||||
info.qp_id = iwdev->vsi.exception_lan_queue; | |||||
info.count = 1; | |||||
info.pd_id = 2; | |||||
info.sq_size = 8192; | |||||
info.rq_size = 8192; | |||||
info.buf_size = iwdev->vsi.mtu + VLAN_ETH_HLEN; | |||||
info.tx_buf_cnt = 4096; | |||||
status = i40iw_puda_create_rsrc(&iwdev->vsi, &info); | |||||
if (status) | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_ERR | I40IW_DEBUG_IEQ, | |||||
"ieq create fail\n"); | |||||
return status; | |||||
} | |||||
/** | |||||
* i40iw_reinitialize_ieq - destroy and re-create ieq | |||||
* @dev: iwarp device | |||||
*/ | |||||
void | |||||
i40iw_reinitialize_ieq(struct i40iw_sc_dev *dev) | |||||
{ | |||||
struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; | |||||
i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, FALSE); | |||||
if (i40iw_initialize_ieq(iwdev)) { | |||||
iwdev->reset = TRUE; | |||||
i40iw_request_reset(iwdev); | |||||
} | |||||
} | |||||
/** | |||||
* i40iw_hmc_setup - create hmc objects for the device | |||||
* @iwdev: iwarp device | |||||
* | |||||
* Set up the device private memory space for the number and size of | |||||
* the hmc objects and create the objects | |||||
* Return 0 if successful, otherwise return error | |||||
*/ | |||||
static enum i40iw_status_code | |||||
i40iw_hmc_setup(struct i40iw_device *iwdev) | |||||
{ | |||||
enum i40iw_status_code status; | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_INIT, "%s: begin\n", __func__); | |||||
iwdev->sd_type = I40IW_SD_TYPE_DIRECT; | |||||
status = i40iw_config_fpm_values(&iwdev->sc_dev, IW_CFG_FPM_QP_COUNT); | |||||
if (status) | |||||
goto exit; | |||||
status = i40iw_create_hmc_objs(iwdev, TRUE); | |||||
if (status) | |||||
goto exit; | |||||
iwdev->init_state = HMC_OBJS_CREATED; | |||||
exit: | |||||
return status; | |||||
} | |||||
/** | |||||
* i40iw_del_init_mem - deallocate memory resources | |||||
* @iwdev: iwarp device | |||||
*/ | |||||
static void | |||||
i40iw_del_init_mem(struct i40iw_device *iwdev) | |||||
{ | |||||
struct i40iw_sc_dev *dev = &iwdev->sc_dev; | |||||
ixliw_free_dma_mem(&iwdev->obj_mem); | |||||
kfree(dev->hmc_info->sd_table.sd_entry); | |||||
dev->hmc_info->sd_table.sd_entry = NULL; | |||||
kfree(iwdev->mem_resources); | |||||
iwdev->mem_resources = NULL; | |||||
kfree(iwdev->ceqlist); | |||||
iwdev->ceqlist = NULL; | |||||
kfree(iwdev->iw_msixtbl); | |||||
iwdev->iw_msixtbl = NULL; | |||||
kfree(iwdev->hmc_info_mem); | |||||
iwdev->hmc_info_mem = NULL; | |||||
} | |||||
/** | |||||
* i40iw_del_macip_entry - remove a mac ip address entry from the hw table | |||||
* @iwdev: iwarp device | |||||
* @idx: the index of the mac ip address to delete | |||||
*/ | |||||
static void | |||||
i40iw_del_macip_entry(struct i40iw_device *iwdev, u8 idx) | |||||
{ | |||||
struct i40iw_cqp *iwcqp = &iwdev->cqp; | |||||
struct i40iw_cqp_request *cqp_request; | |||||
struct cqp_commands_info *cqp_info; | |||||
enum i40iw_status_code status = 0; | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_INIT, "%s: begin\n", __func__); | |||||
cqp_request = i40iw_get_cqp_request(iwcqp, TRUE); | |||||
if (!cqp_request) { | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_ERR | I40IW_DEBUG_CQP, "cqp_request memory failed\n"); | |||||
return; | |||||
} | |||||
cqp_info = &cqp_request->info; | |||||
cqp_info->cqp_cmd = OP_DELETE_LOCAL_MAC_IPADDR_ENTRY; | |||||
cqp_info->post_sq = 1; | |||||
cqp_info->in.u.del_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp; | |||||
cqp_info->in.u.del_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request; | |||||
cqp_info->in.u.del_local_mac_ipaddr_entry.entry_idx = idx; | |||||
cqp_info->in.u.del_local_mac_ipaddr_entry.ignore_ref_count = 0; | |||||
status = i40iw_handle_cqp_op(iwdev, cqp_request); | |||||
if (status) | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CQP, "CQP-OP Del MAC Ip entry fail"); | |||||
} | |||||
/** | |||||
* i40iw_add_mac_ipaddr_entry - add a mac ip address entry to the hw table | |||||
* @iwdev: iwarp device | |||||
* @mac_addr: pointer to mac address | |||||
* @idx: the index of the mac ip address to add | |||||
*/ | |||||
static enum i40iw_status_code | |||||
i40iw_add_mac_ipaddr_entry(struct i40iw_device *iwdev, u8 *mac_addr, u8 idx) | |||||
{ | |||||
struct i40iw_local_mac_ipaddr_entry_info *info; | |||||
struct i40iw_cqp *iwcqp = &iwdev->cqp; | |||||
struct i40iw_cqp_request *cqp_request; | |||||
struct cqp_commands_info *cqp_info; | |||||
enum i40iw_status_code status = 0; | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_INIT, "%s: begin\n", __func__); | |||||
cqp_request = i40iw_get_cqp_request(iwcqp, TRUE); | |||||
if (!cqp_request) { | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_ERR | I40IW_DEBUG_CQP, "cqp_request memory failed\n"); | |||||
return I40IW_ERR_NO_MEMORY; | |||||
} | |||||
cqp_info = &cqp_request->info; | |||||
cqp_info->post_sq = 1; | |||||
info = &cqp_info->in.u.add_local_mac_ipaddr_entry.info; | |||||
ether_addr_copy(info->mac_addr, mac_addr); | |||||
info->entry_idx = idx; | |||||
cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request; | |||||
cqp_info->cqp_cmd = OP_ADD_LOCAL_MAC_IPADDR_ENTRY; | |||||
cqp_info->in.u.add_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp; | |||||
cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request; | |||||
status = i40iw_handle_cqp_op(iwdev, cqp_request); | |||||
if (status) | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CQP, "CQP-OP Add MAC Ip entry fail"); | |||||
return status; | |||||
} | |||||
/** | |||||
* i40iw_alloc_local_mac_ipaddr_entry - allocate a mac ip address entry | |||||
* @iwdev: iwarp device | |||||
* @mac_ip_tbl_idx: the index of the new mac ip address | |||||
* | |||||
* Allocate a mac ip address entry and update the mac_ip_tbl_idx | |||||
* to hold the index of the newly created mac ip address | |||||
* Return 0 if successful, otherwise return error | |||||
*/ | |||||
static enum i40iw_status_code | |||||
i40iw_alloc_local_mac_ipaddr_entry(struct i40iw_device *iwdev, | |||||
u16 *mac_ip_tbl_idx) | |||||
{ | |||||
struct i40iw_cqp *iwcqp = &iwdev->cqp; | |||||
struct i40iw_cqp_request *cqp_request; | |||||
struct cqp_commands_info *cqp_info; | |||||
enum i40iw_status_code status = 0; | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_INIT, "%s: begin\n", __func__); | |||||
cqp_request = i40iw_get_cqp_request(iwcqp, TRUE); | |||||
if (!cqp_request) { | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CQP, "cqp_request memory failed\n"); | |||||
return I40IW_ERR_NO_MEMORY; | |||||
} | |||||
/* increment refcount, because we need the cqp request ret value */ | |||||
atomic_inc(&cqp_request->refcount); | |||||
cqp_info = &cqp_request->info; | |||||
cqp_info->cqp_cmd = OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY; | |||||
cqp_info->post_sq = 1; | |||||
cqp_info->in.u.alloc_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp; | |||||
cqp_info->in.u.alloc_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request; | |||||
status = i40iw_handle_cqp_op(iwdev, cqp_request); | |||||
if (!status) | |||||
*mac_ip_tbl_idx = cqp_request->compl_info.op_ret_val; | |||||
else | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CQP, "CQP-OP Alloc MAC Ip entry fail"); | |||||
/* decrement refcount and free the cqp request, if no longer used */ | |||||
i40iw_put_cqp_request(iwcqp, cqp_request); | |||||
return status; | |||||
} | |||||
/** | |||||
* i40iw_alloc_set_mac_ipaddr - set up a mac ip address table entry | |||||
* @iwdev: iwarp device | |||||
* @macaddr: pointer to mac address | |||||
* | |||||
* Allocate a mac ip address entry and add it to the hw table | |||||
* Return 0 if successful, otherwise return error | |||||
*/ | |||||
static enum i40iw_status_code | |||||
i40iw_alloc_set_mac_ipaddr(struct i40iw_device *iwdev, u8 *macaddr) | |||||
{ | |||||
enum i40iw_status_code status; | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_INIT, "%s: begin\n", __func__); | |||||
status = i40iw_alloc_local_mac_ipaddr_entry(iwdev, &iwdev->mac_ip_table_idx); | |||||
if (!status) { | |||||
status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr, | |||||
(u8)iwdev->mac_ip_table_idx); | |||||
if (status) | |||||
i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); | |||||
} | |||||
return status; | |||||
} | |||||
/** | |||||
* i40iw_add_ipv6_addr - add ipv6 address to the hw arp table | |||||
* @iwdev: iwarp device | |||||
*/ | |||||
static void | |||||
i40iw_add_ipv6_addr(struct i40iw_device *iwdev) | |||||
{ | |||||
struct ifnet *ifp = iwdev->ifp; | |||||
struct ifaddr *ifa, *tmp; | |||||
struct sockaddr_in6 *sin6; | |||||
u32 local_ipaddr6[4]; | |||||
u8 *mac_addr; | |||||
char ip6buf[INET6_ADDRSTRLEN]; | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_INIT, "%s: begin\n", __func__); | |||||
if_addr_rlock(ifp); | |||||
TAILQ_FOREACH_SAFE(ifa, &ifp->if_addrhead, ifa_link, tmp) { | |||||
sin6 = (struct sockaddr_in6 *)ifa->ifa_addr; | |||||
if (sin6->sin6_family != AF_INET6) | |||||
continue; | |||||
i40iw_copy_ip_ntohl(local_ipaddr6, (u32 *)&sin6->sin6_addr); | |||||
mac_addr = IF_LLADDR(ifp); | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, | |||||
"IP=%s, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n", | |||||
ip6_sprintf(ip6buf, &sin6->sin6_addr), | |||||
mac_addr[0], mac_addr[1], mac_addr[2], | |||||
mac_addr[3], mac_addr[4], mac_addr[5]); | |||||
i40iw_manage_arp_cache(iwdev, mac_addr, local_ipaddr6, FALSE, | |||||
I40IW_ARP_ADD); | |||||
} | |||||
if_addr_runlock(ifp); | |||||
} | |||||
/** | |||||
* i40iw_add_ipv4_addr - add ipv4 address to the hw arp table | |||||
* @iwdev: iwarp device | |||||
*/ | |||||
static void | |||||
i40iw_add_ipv4_addr(struct i40iw_device *iwdev) | |||||
{ | |||||
struct ifnet *ifp = iwdev->ifp; | |||||
struct ifaddr *ifa; | |||||
struct sockaddr_in *sin; | |||||
in_addr_t ip_addr; | |||||
u8 *mac_addr; | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_INIT, "%s: begin\n", __func__); | |||||
if_addr_rlock(ifp); | |||||
TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { | |||||
sin = (struct sockaddr_in *)ifa->ifa_addr; | |||||
if (sin->sin_family != AF_INET) | |||||
continue; | |||||
ip_addr = ntohl(sin->sin_addr.s_addr); | |||||
mac_addr = IF_LLADDR(ifp); | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, | |||||
"IP=%d.%d.%d.%d, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n", | |||||
ip_addr >> 24, | |||||
(ip_addr >> 16) & 0xFF, | |||||
(ip_addr >> 8) & 0xFF, | |||||
ip_addr & 0xFF, | |||||
mac_addr[0], mac_addr[1], mac_addr[2], | |||||
mac_addr[3], mac_addr[4], mac_addr[5]); | |||||
i40iw_manage_arp_cache(iwdev, mac_addr, &ip_addr, TRUE, | |||||
I40IW_ARP_ADD); | |||||
} | |||||
if_addr_runlock(ifp); | |||||
} | |||||
/** | |||||
* i40iw_add_mac_ip - add mac and ip addresses | |||||
* @iwdev: iwarp device | |||||
* | |||||
* Create and add a mac ip address entry to the hw table and | |||||
* ipv4/ipv6 addresses to the arp cache | |||||
* Return 0 if successful, otherwise return error | |||||
*/ | |||||
static enum i40iw_status_code | |||||
i40iw_add_mac_ip(struct i40iw_device *iwdev) | |||||
{ | |||||
struct ifnet *ifp = iwdev->ifp; | |||||
enum i40iw_status_code status; | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_INIT, "%s: begin\n", __func__); | |||||
status = i40iw_alloc_set_mac_ipaddr(iwdev, IF_LLADDR(ifp)); | |||||
if (status) | |||||
return status; | |||||
i40iw_add_ipv4_addr(iwdev); | |||||
i40iw_add_ipv6_addr(iwdev); | |||||
return 0; | |||||
} | |||||
/** | |||||
* i40iw_wait_pe_ready - Check if firmware is ready | |||||
* @hw: provides access to registers | |||||
*/ | |||||
static void | |||||
i40iw_wait_pe_ready(struct i40iw_hw *hw) | |||||
{ | |||||
u32 statuscpu0; | |||||
u32 statuscpu1; | |||||
u32 statuscpu2; | |||||
u32 retrycount = 0; | |||||
INIT_DEBUGOUT("begin"); | |||||
do { | |||||
statuscpu0 = rd32(hw, I40E_GLPE_CPUSTATUS0); | |||||
HW_DEBUGOUT("CSR_CQP status[x%04X]", statuscpu0); | |||||
statuscpu1 = rd32(hw, I40E_GLPE_CPUSTATUS1); | |||||
HW_DEBUGOUT("I40E_GLPE_CPUSTATUS1 status[x%04X]", | |||||
statuscpu1); | |||||
statuscpu2 = rd32(hw, I40E_GLPE_CPUSTATUS2); | |||||
HW_DEBUGOUT("I40E_GLPE_CPUSTATUS2 status[x%04X]", | |||||
statuscpu2); | |||||
if ((statuscpu0 == 0x80) && (statuscpu1 == 0x80) && | |||||
(statuscpu2 == 0x80)) | |||||
break; /* SUCCESS */ | |||||
mdelay(1000); | |||||
retrycount++; | |||||
} while (retrycount < 14); | |||||
} | |||||
/** | |||||
* i40iw_initialize_dev - initialize device | |||||
* @iwdev: iwarp device | |||||
* @pf_info: lan device information | |||||
* | |||||
* Allocate memory for the hmc objects and initialize iwdev | |||||
* Return 0 if successful, otherwise clean up the resources | |||||
* and return error | |||||
*/ | |||||
static enum i40iw_status_code | |||||
i40iw_initialize_dev(struct i40iw_device *iwdev, struct ixl_iw_pf *pf_info) | |||||
{ | |||||
struct i40iw_sc_dev *dev = &iwdev->sc_dev; | |||||
struct i40iw_device_init_info info; | |||||
struct i40iw_l2params l2params; | |||||
struct i40iw_dma_mem mem; | |||||
struct i40iw_vsi_stats_info stats_info; | |||||
struct i40iw_vsi_init_info vsi_info; | |||||
u32 i; | |||||
u32 size; | |||||
u16 qset; | |||||
enum i40iw_status_code status; | |||||
i40iw_debug(dev, I40IW_DEBUG_INIT, "%s: begin\n", __func__); | |||||
memset(&l2params, 0, sizeof(l2params)); | |||||
memset(&info, 0, sizeof(info)); | |||||
size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) + | |||||
(sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX); | |||||
iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL); | |||||
if (!iwdev->hmc_info_mem) { | |||||
i40iw_debug(dev, I40IW_DEBUG_ERR | I40IW_DEBUG_INIT, | |||||
"%s: memory alloc fail\n", __func__); | |||||
return I40IW_ERR_NO_MEMORY; | |||||
} | |||||
iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem; | |||||
dev->hmc_info = &iwdev->hw.hmc; | |||||
dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1); | |||||
status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE, | |||||
I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK); | |||||
if (status) | |||||
goto exit; | |||||
info.fpm_query_buf_pa = mem.pa; | |||||
info.fpm_query_buf = mem.va; | |||||
status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE, | |||||
I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK); | |||||
if (status) | |||||
goto exit; | |||||
info.fpm_commit_buf_pa = mem.pa; | |||||
info.fpm_commit_buf = mem.va; | |||||
info.hmc_fn_id = pf_info->pf_id; | |||||
info.is_pf = TRUE; | |||||
info.bar0 = iwdev->hw.hw_addr; | |||||
info.hw = &iwdev->hw; | |||||
info.debug_mask = ixliw_debug; | |||||
l2params.mtu = (pf_info->mtu) ? pf_info->mtu : iwdev->vsi.mtu; | |||||
for (i = 0; i < IXL_IW_MAX_USER_PRIORITY; i++) { | |||||
qset = pf_info->qs_handle[i]; | |||||
l2params.qs_handle_list[i] = qset; | |||||
} | |||||
info.vchnl_send = NULL; /* virtual channel not supported */ | |||||
memset(&vsi_info, 0, sizeof(vsi_info)); | |||||
vsi_info.dev = &iwdev->sc_dev; | |||||
vsi_info.back_vsi = (void *)iwdev; | |||||
vsi_info.params = &l2params; | |||||
vsi_info.exception_lan_queue = 1; | |||||
i40iw_sc_vsi_init(&iwdev->vsi, &vsi_info); | |||||
memset(&stats_info, 0, sizeof(stats_info)); | |||||
stats_info.fcn_id = iwdev->ldev->pf_id; | |||||
stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL); | |||||
if (!stats_info.pestat) { | |||||
i40iw_pr_err("memory alloc fail\n"); | |||||
status = I40IW_ERR_NO_MEMORY; | |||||
goto exit; | |||||
} | |||||
status = i40iw_device_init(&iwdev->sc_dev, &info); | |||||
exit: | |||||
if (status) { | |||||
kfree(iwdev->hmc_info_mem); | |||||
iwdev->hmc_info_mem = NULL; | |||||
} | |||||
return status; | |||||
} | |||||
/** | |||||
* i40iw_save_msix_info - copy msix vector information to iwarp device | |||||
* @iwdev: iwarp device | |||||
* @pf_info: lan device information | |||||
* | |||||
* Allocate iwdev msix table and copy the pf_info msix info to the table | |||||
* Return 0 if successful, otherwise return error | |||||
*/ | |||||
static enum i40iw_status_code | |||||
i40iw_save_msix_info(struct i40iw_device *iwdev, struct ixl_iw_pf *pf_info) | |||||
{ | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_INIT, "%s: begin\n", __func__); | |||||
iwdev->msix_info.base = pf_info->iw_msix.base; | |||||
iwdev->msix_info.count = pf_info->iw_msix.count; | |||||
iwdev->msix_shared = TRUE; | |||||
iwdev->msix_mapping.itr_indx = 3; | |||||
iwdev->msix_mapping.aeq_vector = pf_info->iw_msix.base; | |||||
return 0; | |||||
} | |||||
static int | |||||
ixliw_get_cpu_cnt(struct i40iw_device *iwdev) | |||||
{ | |||||
#if __FreeBSD_version >= 1100000 | |||||
device_t dev = iwdev->ldev->dev; | |||||
cpuset_t cpu_set; | |||||
int cpu_cnt; | |||||
if (bus_get_cpus(dev, INTR_CPUS, sizeof(cpu_set), &cpu_set)) { | |||||
cpu_cnt = mp_ncpus; | |||||
} else { | |||||
cpu_cnt = CPU_COUNT(&cpu_set); | |||||
} | |||||
#else | |||||
int cpu_cnt = mp_ncpus; | |||||
#endif | |||||
return cpu_cnt; | |||||
} | |||||
/** | |||||
* ixliw_get_int_cpu - determine which cpu core to use for msix vector, | |||||
* uses bus_get_cpus on FreeBSD11 or newer | |||||
* @iwdev: iwarp device | |||||
* @msix_taken: which intr cpu to use | |||||
* returns id of intr cpu that will be used, or msix_taken for FBSD10.3 | |||||
*/ | |||||
static int | |||||
ixliw_get_int_cpu(struct i40iw_device *iwdev, int msix_taken) | |||||
{ | |||||
int result = -1; | |||||
#if __FreeBSD_version >= 1100000 | |||||
device_t dev = iwdev->ldev->dev; | |||||
cpuset_t cpu_set; | |||||
u64 i = 0; | |||||
int intr_cpu_cnt = -1; | |||||
if (!bus_get_cpus(dev, INTR_CPUS, sizeof(cpu_set), &cpu_set)) { | |||||
while (intr_cpu_cnt < msix_taken) { | |||||
if ((cpu_set.__bits[i / 64] & (((u64)1) << (i % 64)))) { | |||||
++intr_cpu_cnt; | |||||
if (intr_cpu_cnt == msix_taken) | |||||
break; | |||||
} | |||||
++i; | |||||
if (i >= mp_ncpus) | |||||
i = i % mp_ncpus; | |||||
} | |||||
result = i; | |||||
} | |||||
#endif | |||||
/* | |||||
* if FBSD10 or bus_get_cpus returns error | |||||
*/ | |||||
if (result < 0) { | |||||
/* | |||||
* ensure non-negative value will be returned | |||||
*/ | |||||
if (iwdev->msix_count <= mp_ncpus / 2) | |||||
/* get next even id */ | |||||
result = max(((msix_taken + 1) & 0xFFFFFFFE) % mp_ncpus, 0); | |||||
else | |||||
result = max((msix_taken) % mp_ncpus, 0); | |||||
} | |||||
return result; | |||||
} | |||||
/** | |||||
* ixliw_setup_msix - prepare msix vector infrastructure | |||||
* @iwdev: iwarp device | |||||
* | |||||
* Determine the number of interrupt vectors, alloc structures - iw_msixtbl | |||||
* and ceq_vectors | |||||
* | |||||
*/ | |||||
static int | |||||
ixliw_setup_msix(struct i40iw_device *iwdev) | |||||
{ | |||||
int cpu_id; | |||||
int inc_cpuid = 1; | |||||
int vector; | |||||
u32 i; | |||||
u32 size; | |||||
iwdev->msix_count = min(iwdev->msix_info.count, iwdev->sc_dev.hmc_fpm_misc.max_ceqs); | |||||
iwdev->msix_count = min(iwdev->msix_count, ixliw_get_cpu_cnt(iwdev)); | |||||
/* | |||||
* if ixliw_max_ceq is different than 0, add upper constraint for the | |||||
* number of ceq. | |||||
*/ | |||||
if (ixliw_max_ceq) | |||||
iwdev->msix_count = min(iwdev->msix_count, ixliw_max_ceq); | |||||
size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count; | |||||
iwdev->iw_msixtbl = kzalloc(size, GFP_KERNEL); | |||||
if (!iwdev->iw_msixtbl) | |||||
return I40IW_ERR_NO_MEMORY; | |||||
iwdev->msix_mapping.ceq_cnt = iwdev->msix_count; | |||||
size = sizeof(int) * iwdev->msix_mapping.ceq_cnt; | |||||
iwdev->msix_mapping.ceq_vector = kzalloc(size, GFP_KERNEL); | |||||
if (!iwdev->msix_mapping.ceq_vector) { | |||||
kfree(iwdev->iw_msixtbl); | |||||
return I40IW_ERR_NO_MEMORY; | |||||
} | |||||
/* | |||||
* if there is less vectors than half of cpus the chances are we will | |||||
* be able to assign them onto physical cores only | |||||
*/ | |||||
if (iwdev->msix_count < ixliw_get_cpu_cnt(iwdev) / 2) | |||||
inc_cpuid = 2; | |||||
else | |||||
inc_cpuid = 1; | |||||
vector = cpu_id = iwdev->msix_info.base; | |||||
/* avoid ceq_vector[0] being asigned to the same core on every port */ | |||||
cpu_id += (iwdev->ldev->pf_id) * inc_cpuid; | |||||
for (i = 0; i < iwdev->msix_mapping.ceq_cnt; cpu_id += inc_cpuid, i++) { | |||||
iwdev->msix_mapping.ceq_vector[i] = vector; | |||||
iwdev->iw_msixtbl[i].idx = vector++; | |||||
iwdev->iw_msixtbl[i].cpu_affinity = ixliw_get_int_cpu(iwdev, cpu_id); | |||||
} | |||||
return 0; | |||||
} | |||||
/** | |||||
* i40iw_deinit_device - clean up the device resources | |||||
* @iwdev: iwarp device | |||||
* | |||||
* Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 | |||||
* addresses, destroy the device queues and free the pble and the hmc objects | |||||
*/ | |||||
static void | |||||
i40iw_deinit_device(struct i40iw_device *iwdev) | |||||
{ | |||||
struct i40iw_sc_dev *dev = &iwdev->sc_dev; | |||||
i40iw_debug(dev, I40IW_DEBUG_INIT, "state = %d", iwdev->init_state); | |||||
switch (iwdev->init_state) { | |||||
case RDMA_DEV_REGISTERED: | |||||
iwdev->iw_status = 0; | |||||
i40iw_port_ibevent(iwdev); | |||||
i40iw_destroy_rdma_device(iwdev->iwibdev); | |||||
/* fallthrough */ | |||||
case IP_ADDR_REGISTERED: | |||||
i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); | |||||
/* fallthrough */ | |||||
case PBLE_CHUNK_MEM: | |||||
i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc); | |||||
/* fallthrough */ | |||||
case CEQ_CREATED: | |||||
i40iw_del_ceqs(iwdev); | |||||
/* fallthrough */ | |||||
case AEQ_CREATED: | |||||
i40iw_destroy_aeq(iwdev); | |||||
/* fallthrough */ | |||||
case IEQ_CREATED: | |||||
i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, FALSE); | |||||
/* fallthrough */ | |||||
case ILQ_CREATED: | |||||
i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, FALSE); | |||||
/* fallthrough */ | |||||
case CCQ_CREATED: | |||||
i40iw_destroy_ccq(iwdev); | |||||
/* fallthrough */ | |||||
case HMC_OBJS_CREATED: | |||||
i40iw_del_hmc_objects(dev, dev->hmc_info); | |||||
/* fallthrough */ | |||||
case CQP_CREATED: | |||||
i40iw_destroy_cqp(iwdev, TRUE); | |||||
/* fallthrough */ | |||||
case INITIAL_STATE: | |||||
i40iw_del_init_mem(iwdev); | |||||
break; | |||||
case INVALID_STATE: | |||||
/* fallthrough */ | |||||
default: | |||||
i40iw_debug(dev, I40IW_DEBUG_ERR | I40IW_DEBUG_INIT, | |||||
"bad init_state = %d\n", iwdev->init_state); | |||||
break; | |||||
} | |||||
i40iw_cleanup_cm_core(&iwdev->cm_core); | |||||
i40iw_del_handler(i40iw_find_handler(iwdev->ldev)); | |||||
kfree(iwdev->hdl); | |||||
} | |||||
/** | |||||
* i40iw_setup_init_state - set up the initial device struct | |||||
* @hdl: handler for iwarp device - one per instance | |||||
* @pf_info: lan device information | |||||
* | |||||
* Initialize the iwarp device and its hdl information | |||||
* using the ldev and client information | |||||
* Return 0 if successful, otherwise return error | |||||
*/ | |||||
static enum i40iw_status_code | |||||
i40iw_setup_init_state(struct i40iw_handler *hdl, struct ixl_iw_pf *pf_info) | |||||
{ | |||||
struct i40iw_device *iwdev = &hdl->device; | |||||
enum i40iw_status_code status; | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_INIT, "begin"); | |||||
memcpy(&hdl->ldev, pf_info, sizeof(*pf_info)); | |||||
iwdev->mpa_version = ixliw_mpa_version; | |||||
iwdev->resource_profile = I40IW_HMC_PROFILE_DEFAULT; | |||||
iwdev->max_rdma_vfs = 0; | |||||
iwdev->ifp = pf_info->ifp; | |||||
iwdev->vsi.mtu = pf_info->ifp->if_mtu; | |||||
status = i40iw_save_msix_info(iwdev, pf_info); | |||||
if (status) | |||||
goto exit; | |||||
iwdev->dev_ctx.dev = pf_info->dev; | |||||
iwdev->dev_ctx.mem_bus_space_tag = rman_get_bustag(pf_info->pci_mem); | |||||
iwdev->dev_ctx.mem_bus_space_handle = rman_get_bushandle(pf_info->pci_mem); | |||||
iwdev->dev_ctx.mem_bus_space_size = rman_get_size(pf_info->pci_mem); | |||||
iwdev->hw.dev_context = &iwdev->dev_ctx; | |||||
iwdev->hw.hw_addr = (u8 *)rman_get_virtual(pf_info->pci_mem); | |||||
status = i40iw_allocate_dma_mem(&iwdev->hw, | |||||
&iwdev->obj_mem, 8192, 4096); | |||||
if (status) | |||||
goto exit; | |||||
iwdev->obj_next = iwdev->obj_mem; | |||||
iwdev->push_mode = ixliw_push_mode; | |||||
init_waitqueue_head(&iwdev->vchnl_waitq); | |||||
init_waitqueue_head(&iwdev->close_wq); | |||||
status = i40iw_initialize_dev(iwdev, pf_info); | |||||
exit: | |||||
if (status) { | |||||
kfree(iwdev->iw_msixtbl); | |||||
ixliw_free_dma_mem(&iwdev->obj_mem); | |||||
iwdev->iw_msixtbl = NULL; | |||||
} | |||||
return status; | |||||
} | |||||
/* | |||||
* i40iw_get_used_rsrc - determine resources used internally | |||||
*/ | |||||
static void | |||||
i40iw_get_used_rsrc(struct i40iw_device *iwdev) | |||||
{ | |||||
iwdev->used_pds = find_next_zero_bit(iwdev->allocated_pds, iwdev->max_pd, 0); | |||||
iwdev->used_qps = find_next_zero_bit(iwdev->allocated_qps, iwdev->max_qp, 0); | |||||
iwdev->used_cqs = find_next_zero_bit(iwdev->allocated_cqs, iwdev->max_cq, 0); | |||||
iwdev->used_mrs = find_next_zero_bit(iwdev->allocated_mrs, iwdev->max_mr, 0); | |||||
} | |||||
static int | |||||
ixliw_pf_init(struct ixl_iw_pf *pf_info) | |||||
{ | |||||
struct i40iw_sc_dev *dev; | |||||
struct i40iw_handler *hdl; | |||||
struct i40iw_device *iwdev; | |||||
int err; | |||||
enum i40iw_status_code status; | |||||
INIT_DBG_DEV(pf_info->dev, "begin"); | |||||
#ifdef IW_IXL_DEBUG | |||||
printf("%s:\tdevice %s\n", __func__, | |||||
device_get_nameunit(pf_info->dev)); | |||||
printf("%s:\tpf_id=%u\n", __func__, pf_info->pf_id); | |||||
printf("%s:\tdev=%p, ifp=%p\n", __func__, | |||||
pf_info->dev, pf_info->ifp); | |||||
printf("%s:\tmsix: base=%d, count=%d\n", __func__, | |||||
pf_info->iw_msix.base, pf_info->iw_msix.count); | |||||
printf("%s:\tmtu=%u\n", __func__, pf_info->mtu); | |||||
for (int i = 0; i < IXL_IW_MAX_USER_PRIORITY; i++) | |||||
printf("%s:\tqs_handle[%i]=%u\n", __func__, | |||||
i, pf_info->qs_handle[i]); | |||||
#endif /* IW_IXL_DEBUG */ | |||||
hdl = kzalloc(sizeof(*hdl), GFP_KERNEL); | |||||
if (!hdl) | |||||
return ENOMEM; | |||||
iwdev = &hdl->device; | |||||
iwdev->hdl = hdl; | |||||
dev = &iwdev->sc_dev; | |||||
i40iw_debug(dev, I40IW_DEBUG_CLNT, "Entering %s, iwdev is %p\n", __func__, iwdev); | |||||
i40iw_setup_cm_core(iwdev); | |||||
dev->back_dev = (void *)iwdev; | |||||
iwdev->ldev = &hdl->ldev; | |||||
mutex_init(&iwdev->pbl_mutex); | |||||
i40iw_add_handler(hdl); | |||||
do { | |||||
status = i40iw_setup_init_state(hdl, pf_info); | |||||
if (status) | |||||
break; | |||||
iwdev->init_state = INITIAL_STATE; | |||||
if (dev->is_pf) | |||||
i40iw_wait_pe_ready(dev->hw); | |||||
status = i40iw_create_cqp(iwdev); | |||||
if (status) | |||||
break; | |||||
iwdev->init_state = CQP_CREATED; | |||||
status = i40iw_hmc_setup(iwdev); | |||||
if (status) | |||||
break; | |||||
status = i40iw_create_ccq(iwdev); | |||||
if (status) | |||||
break; | |||||
iwdev->init_state = CCQ_CREATED; | |||||
status = i40iw_initialize_ilq(iwdev); | |||||
if (status) | |||||
break; | |||||
iwdev->init_state = ILQ_CREATED; | |||||
status = i40iw_initialize_ieq(iwdev); | |||||
if (status) | |||||
break; | |||||
iwdev->init_state = IEQ_CREATED; | |||||
status = ixliw_setup_msix(iwdev); | |||||
if (status) | |||||
break; | |||||
status = i40iw_setup_aeq(iwdev); | |||||
if (status) | |||||
break; | |||||
iwdev->init_state = AEQ_CREATED; | |||||
status = i40iw_setup_ceqs(iwdev); | |||||
if (status) | |||||
break; | |||||
iwdev->init_state = CEQ_CREATED; | |||||
status = i40iw_initialize_hw_resources(iwdev); | |||||
if (status) | |||||
break; | |||||
dev->ccq_ops->ccq_arm(dev->ccq); | |||||
status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc); | |||||
if (status) | |||||
break; | |||||
iwdev->init_state = PBLE_CHUNK_MEM; | |||||
status = i40iw_add_mac_ip(iwdev); | |||||
if (status) | |||||
break; | |||||
iwdev->init_state = IP_ADDR_REGISTERED; | |||||
err = i40iw_register_rdma_device(iwdev); | |||||
if (err) { | |||||
i40iw_debug(dev, I40IW_DEBUG_ERR | I40IW_DEBUG_INIT, | |||||
"register rdma device fail (err = %d)\n", err); | |||||
break; | |||||
}; | |||||
iwdev->init_state = RDMA_DEV_REGISTERED; | |||||
iwdev->iw_status = 1; | |||||
i40iw_port_ibevent(iwdev); | |||||
i40iw_get_used_rsrc(iwdev); | |||||
i40iw_debug(dev, I40IW_DEBUG_CLNT, | |||||
"Exiting %s, iwdev is %p. Returning 0.\n", __func__, iwdev); | |||||
INIT_DBG_DEV(iwdev->ldev->dev, "completed"); | |||||
return (0); | |||||
} while (0); | |||||
device_printf(iwdev->ldev->dev, | |||||
"%s: failed (status = %d, last completion = %d)\n", | |||||
__func__, status, iwdev->init_state); | |||||
i40iw_deinit_device(iwdev); | |||||
return (ERESTART); | |||||
} | |||||
static int | |||||
ixliw_pf_stop(struct ixl_iw_pf *pf_info) | |||||
{ | |||||
struct i40iw_handler *hdl; | |||||
struct i40iw_device *iwdev; | |||||
INIT_DBG_DEV(pf_info->dev, "begin"); | |||||
hdl = i40iw_find_handler(pf_info); | |||||
if (!hdl) { | |||||
printf("%s: PF handler not found\n", __func__); | |||||
return (ENOENT); | |||||
} | |||||
iwdev = &hdl->device; | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CLNT, "Entering %s, iwdev is %p\n", | |||||
__func__, iwdev); | |||||
iwdev->closing = TRUE; | |||||
i40iw_deinit_device(iwdev); | |||||
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CLNT, "Exiting %s, iwdev is %p\n", | |||||
__func__, iwdev); | |||||
INIT_DBG_DEV(pf_info->dev, "completed"); | |||||
return (0); | |||||
} | |||||
static int | |||||
ixliw_mod_load(void) | |||||
{ | |||||
struct ixl_iw_ops iw_ops; | |||||
int err; | |||||
INIT_DEBUGOUT("begin"); | |||||
printf("iw_ixl module version: %s\n", ixliw_driver_version); | |||||
spin_lock_init(&i40iw_handler_lock); | |||||
INIT_LIST_HEAD(&i40iw_handlers); | |||||
iw_ops.init = ixliw_pf_init; | |||||
iw_ops.stop = ixliw_pf_stop; | |||||
err = ixl_iw_register(&iw_ops); | |||||
if (err) | |||||
printf("%s: failed to register to if_ixl (err=%d)\n", __func__, err); | |||||
return (err); | |||||
} | |||||
static int | |||||
ixliw_mod_unload(void) | |||||
{ | |||||
int err; | |||||
INIT_DEBUGOUT("begin"); | |||||
err = ixl_iw_unregister(); | |||||
if (err) | |||||
printf("%s: failed to unregister from if_ixl (err=%d)\n", __func__, | |||||
err); | |||||
return (err); | |||||
} | |||||
static int | |||||
ixliw_modevent(module_t mod __unused, int event, void *arg __unused) | |||||
{ | |||||
int err = 0; | |||||
switch (event) { | |||||
case MOD_LOAD: | |||||
err = ixliw_mod_load(); | |||||
break; | |||||
case MOD_UNLOAD: | |||||
err = ixliw_mod_unload(); | |||||
break; | |||||
default: | |||||
err = EOPNOTSUPP; | |||||
break; | |||||
} | |||||
return (err); | |||||
} | |||||
static moduledata_t ixliw_mod = { | |||||
"iw_ixl", | |||||
ixliw_modevent, | |||||
NULL | |||||
}; | |||||
DECLARE_MODULE(iw_ixl, ixliw_mod, SI_SUB_DRIVERS, SI_ORDER_ANY); | |||||
MODULE_VERSION(iw_ixl, 1); | |||||
MODULE_DEPEND(iw_ixl, ixl, 1, 2, 2); | |||||
MODULE_DEPEND(iw_ixl, ibcore, 1, 1, 1); |