Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/ixl/iwarp/iw_ixl_utils.c
- This file was added.
/******************************************************************************* | |||||
* | |||||
* Copyright (c) 2015-2017 Intel Corporation. All rights reserved. | |||||
* | |||||
* This software is available to you under a choice of one of two | |||||
* licenses. You may choose to be licensed under the terms of the GNU | |||||
* General Public License (GPL) Version 2, available from the file | |||||
* COPYING in the main directory of this source tree, or the | |||||
* OpenFabrics.org BSD license below: | |||||
* | |||||
* Redistribution and use in source and binary forms, with or | |||||
* without modification, are permitted provided that the following | |||||
* conditions are met: | |||||
* | |||||
* - Redistributions of source code must retain the above | |||||
* copyright notice, this list of conditions and the following | |||||
* disclaimer. | |||||
* | |||||
* - Redistributions in binary form must reproduce the above | |||||
* copyright notice, this list of conditions and the following | |||||
* disclaimer in the documentation and/or other materials | |||||
* provided with the distribution. | |||||
* | |||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |||||
* SOFTWARE. | |||||
* | |||||
*******************************************************************************/ | |||||
/*$FreeBSD$*/ | |||||
#include "iw_ixl_linux_wait.h" | |||||
#include "iw_ixl.h" | |||||
#if __FreeBSD_version < 1100000 | |||||
#include "iw_ixl_linux_jiffies.h" | |||||
#endif /* IW_IXL_FREEBSD10 */ | |||||
#include <net/if_vlan_var.h> | |||||
#include <netinet/if_ether.h> | |||||
#include <netinet/ip.h> | |||||
#include <netinet/ip6.h> | |||||
#include <netinet/tcp.h> | |||||
/** | |||||
* i40iw_arp_table - manage arp table | |||||
* @iwdev: iwarp device | |||||
* @ip_addr: ip address for device | |||||
* @mac_addr: mac address ptr | |||||
* @action: modify, delete or add | |||||
*/ | |||||
int | |||||
i40iw_arp_table(struct i40iw_device *iwdev, u32 * ip_addr, bool ipv4, | |||||
u8 * mac_addr, u32 action) | |||||
{ | |||||
int arp_index; | |||||
int err; | |||||
u32 ip[4]; | |||||
if (ipv4) { | |||||
memset(ip, 0, sizeof(ip)); | |||||
ip[0] = *ip_addr; | |||||
} else { | |||||
memcpy(ip, ip_addr, sizeof(ip)); | |||||
} | |||||
for (arp_index = 0; (u32) arp_index < iwdev->arp_table_size; arp_index++) | |||||
if (memcmp(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip)) == 0) | |||||
break; | |||||
switch (action) { | |||||
case I40IW_ARP_ADD: | |||||
if (arp_index != iwdev->arp_table_size) | |||||
return -1; | |||||
arp_index = 0; | |||||
err = i40iw_alloc_resource(iwdev, iwdev->allocated_arps, | |||||
iwdev->arp_table_size, | |||||
(u32 *) & arp_index, | |||||
&iwdev->next_arp_index); | |||||
if (err) | |||||
return err; | |||||
memcpy(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip)); | |||||
ether_addr_copy(iwdev->arp_table[arp_index].mac_addr, mac_addr); | |||||
break; | |||||
case I40IW_ARP_RESOLVE: | |||||
if (arp_index == iwdev->arp_table_size) | |||||
return -1; | |||||
break; | |||||
case I40IW_ARP_DELETE: | |||||
if (arp_index == iwdev->arp_table_size) | |||||
return -1; | |||||
memset(iwdev->arp_table[arp_index].ip_addr, 0, | |||||
sizeof(iwdev->arp_table[arp_index].ip_addr)); | |||||
memset(iwdev->arp_table[arp_index].mac_addr, 0x00, | |||||
ETHER_ADDR_LEN); | |||||
i40iw_free_resource(iwdev, iwdev->allocated_arps, arp_index); | |||||
break; | |||||
default: | |||||
return -1; | |||||
} | |||||
return arp_index; | |||||
} | |||||
/** | |||||
* i40iw_get_cqp_request - get cqp struct | |||||
* @cqp: device cqp ptr | |||||
* @wait: cqp to be used in wait mode | |||||
*/ | |||||
struct i40iw_cqp_request * | |||||
i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait) | |||||
{ | |||||
struct i40iw_cqp_request *cqp_request = NULL; | |||||
unsigned long flags; | |||||
spin_lock_irqsave(&cqp->req_lock, flags); | |||||
if (!list_empty(&cqp->cqp_avail_reqs)) { | |||||
cqp_request = list_entry(cqp->cqp_avail_reqs.next, | |||||
struct i40iw_cqp_request, list); | |||||
list_del_init(&cqp_request->list); | |||||
} | |||||
spin_unlock_irqrestore(&cqp->req_lock, flags); | |||||
if (!cqp_request) { | |||||
cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC); | |||||
if (cqp_request) { | |||||
cqp_request->dynamic = TRUE; | |||||
INIT_LIST_HEAD(&cqp_request->list); | |||||
init_waitqueue_head(&cqp_request->waitq); | |||||
} | |||||
} | |||||
if (!cqp_request) { | |||||
DPRINTF("CQP Request Fail: No Memory"); | |||||
return NULL; | |||||
} | |||||
if (wait) { | |||||
atomic_set(&cqp_request->refcount, 2); | |||||
cqp_request->waiting = TRUE; | |||||
} else { | |||||
atomic_set(&cqp_request->refcount, 1); | |||||
} | |||||
return cqp_request; | |||||
} | |||||
/** | |||||
* i40iw_free_cqp_request - free cqp request | |||||
* @cqp: cqp ptr | |||||
* @cqp_request: to be put back in cqp list | |||||
*/ | |||||
void | |||||
i40iw_free_cqp_request(struct i40iw_cqp *cqp, | |||||
struct i40iw_cqp_request *cqp_request) | |||||
{ | |||||
unsigned long flags; | |||||
if (cqp_request->dynamic) { | |||||
kfree(cqp_request); | |||||
} else { | |||||
cqp_request->request_done = FALSE; | |||||
cqp_request->callback_fcn = NULL; | |||||
cqp_request->waiting = FALSE; | |||||
spin_lock_irqsave(&cqp->req_lock, flags); | |||||
list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs); | |||||
spin_unlock_irqrestore(&cqp->req_lock, flags); | |||||
} | |||||
} | |||||
/** | |||||
* i40iw_put_cqp_request - dec ref count and free if 0 | |||||
* @cqp: cqp ptr | |||||
* @cqp_request: to be put back in cqp list | |||||
*/ | |||||
void | |||||
i40iw_put_cqp_request(struct i40iw_cqp *cqp, | |||||
struct i40iw_cqp_request *cqp_request) | |||||
{ | |||||
if (atomic_dec_and_test(&cqp_request->refcount)) | |||||
i40iw_free_cqp_request(cqp, cqp_request); | |||||
} | |||||
/** | |||||
* i40iw_free_qp - callback after destroy cqp completes | |||||
* @cqp_request: cqp request for destroy qp | |||||
* @num: not used | |||||
*/ | |||||
static void | |||||
i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num) | |||||
{ | |||||
struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)cqp_request->param; | |||||
struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp; | |||||
struct i40iw_device *iwdev; | |||||
u32 qp_num = iwqp->ibqp.qp_num; | |||||
iwdev = iwqp->iwdev; | |||||
i40iw_rem_pdusecount(iwqp->iwpd, iwdev); | |||||
i40iw_free_qp_resources(iwdev, iwqp, qp_num); | |||||
i40iw_rem_devusecount(iwdev); | |||||
} | |||||
/** | |||||
* i40iw_wait_event - wait for completion | |||||
* @iwdev: iwarp device | |||||
* @cqp_request: cqp request to wait | |||||
*/ | |||||
static int | |||||
i40iw_wait_event(struct i40iw_device *iwdev, | |||||
struct i40iw_cqp_request *cqp_request) | |||||
{ | |||||
struct cqp_commands_info *info = &cqp_request->info; | |||||
struct i40iw_cqp *iwcqp = &iwdev->cqp; | |||||
int err_code = 0; | |||||
int timeout_ret = 0; | |||||
bool cqp_error = FALSE; | |||||
timeout_ret = wait_event_timeout(cqp_request->waitq, | |||||
cqp_request->request_done, | |||||
I40IW_EVENT_TIMEOUT); | |||||
if (!timeout_ret) { | |||||
device_printf(iwdev->ldev->dev, | |||||
"error cqp command 0x%x timed out ret = %d\n", | |||||
info->cqp_cmd, timeout_ret); | |||||
err_code = -ETIMEDOUT; | |||||
if (!iwdev->reset) { | |||||
iwdev->reset = TRUE; | |||||
i40iw_request_reset(iwdev); | |||||
} | |||||
goto done; | |||||
} | |||||
cqp_error = cqp_request->compl_info.error; | |||||
if (cqp_error) { | |||||
device_printf(iwdev->ldev->dev, | |||||
"error cqp command 0x%x completion maj = 0x%x min=0x%x\n", | |||||
info->cqp_cmd, cqp_request->compl_info.maj_err_code, | |||||
cqp_request->compl_info.min_err_code); | |||||
err_code = -EPROTO; | |||||
goto done; | |||||
} | |||||
done: | |||||
i40iw_put_cqp_request(iwcqp, cqp_request); | |||||
return err_code; | |||||
} | |||||
/** | |||||
* i40iw_handle_cqp_op - process cqp command | |||||
* @iwdev: iwarp device | |||||
* @cqp_request: cqp request to process | |||||
*/ | |||||
enum i40iw_status_code | |||||
i40iw_handle_cqp_op(struct i40iw_device *iwdev, | |||||
struct i40iw_cqp_request *cqp_request) | |||||
{ | |||||
struct i40iw_sc_dev *dev = &iwdev->sc_dev; | |||||
struct cqp_commands_info *info = &cqp_request->info; | |||||
int err_code = 0; | |||||
enum i40iw_status_code status; | |||||
if (iwdev->reset) { | |||||
i40iw_free_cqp_request(&iwdev->cqp, cqp_request); | |||||
return I40IW_ERR_CQP_COMPL_ERROR; | |||||
} | |||||
status = i40iw_process_cqp_cmd(dev, info); | |||||
if (status) { | |||||
device_printf(iwdev->ldev->dev, | |||||
"error cqp command 0x%x failed\n", info->cqp_cmd); | |||||
i40iw_free_cqp_request(&iwdev->cqp, cqp_request); | |||||
return status; | |||||
} | |||||
if (cqp_request->waiting) | |||||
err_code = i40iw_wait_event(iwdev, cqp_request); | |||||
if (err_code) | |||||
status = I40IW_ERR_CQP_COMPL_ERROR; | |||||
return status; | |||||
} | |||||
/** | |||||
* i40iw_add_devusecount - add dev refcount | |||||
* @iwdev: dev for refcount | |||||
*/ | |||||
void | |||||
i40iw_add_devusecount(struct i40iw_device *iwdev) | |||||
{ | |||||
atomic_inc(&iwdev->use_count); | |||||
} | |||||
/** | |||||
* i40iw_rem_devusecount - decrement refcount for dev | |||||
* @iwdev: device | |||||
*/ | |||||
void | |||||
i40iw_rem_devusecount(struct i40iw_device *iwdev) | |||||
{ | |||||
if (!atomic_dec_and_test(&iwdev->use_count)) | |||||
return; | |||||
wake_up(&iwdev->close_wq); | |||||
} | |||||
/** | |||||
* i40iw_add_pdusecount - add pd refcount | |||||
* @iwpd: pd for refcount | |||||
*/ | |||||
void | |||||
i40iw_add_pdusecount(struct i40iw_pd *iwpd) | |||||
{ | |||||
atomic_inc(&iwpd->usecount); | |||||
} | |||||
/** | |||||
* i40iw_rem_pdusecount - decrement refcount for pd and free if 0 | |||||
* @iwpd: pd for refcount | |||||
* @iwdev: iwarp device | |||||
*/ | |||||
void | |||||
i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev) | |||||
{ | |||||
if (!atomic_dec_and_test(&iwpd->usecount)) | |||||
return; | |||||
i40iw_free_resource(iwdev, iwdev->allocated_pds, iwpd->sc_pd.pd_id); | |||||
kfree(iwpd); | |||||
} | |||||
/** | |||||
* i40iw_add_ref - add refcount for qp | |||||
* @ibqp: iqarp qp | |||||
*/ | |||||
void | |||||
i40iw_add_ref(struct ib_qp *ibqp) | |||||
{ | |||||
struct i40iw_qp *iwqp = (struct i40iw_qp *)ibqp; | |||||
atomic_inc(&iwqp->refcount); | |||||
} | |||||
/** | |||||
* i40iw_rem_ref - rem refcount for qp and free if 0 | |||||
* @ibqp: iqarp qp | |||||
*/ | |||||
void | |||||
i40iw_rem_ref(struct ib_qp *ibqp) | |||||
{ | |||||
struct i40iw_qp *iwqp; | |||||
struct i40iw_cqp_request *cqp_request; | |||||
struct cqp_commands_info *cqp_info; | |||||
struct i40iw_device *iwdev; | |||||
unsigned long flags; | |||||
u32 qp_num; | |||||
enum i40iw_status_code status; | |||||
iwqp = to_iwqp(ibqp); | |||||
iwdev = iwqp->iwdev; | |||||
spin_lock_irqsave(&iwdev->qptable_lock, flags); | |||||
if (!atomic_dec_and_test(&iwqp->refcount)) { | |||||
spin_unlock_irqrestore(&iwdev->qptable_lock, flags); | |||||
return; | |||||
} | |||||
qp_num = iwqp->ibqp.qp_num; | |||||
iwdev->qp_table[qp_num] = NULL; | |||||
spin_unlock_irqrestore(&iwdev->qptable_lock, flags); | |||||
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, FALSE); | |||||
if (!cqp_request) | |||||
return; | |||||
cqp_request->callback_fcn = i40iw_free_qp; | |||||
cqp_request->param = (void *)&iwqp->sc_qp; | |||||
cqp_info = &cqp_request->info; | |||||
cqp_info->cqp_cmd = OP_QP_DESTROY; | |||||
cqp_info->post_sq = 1; | |||||
cqp_info->in.u.qp_destroy.qp = &iwqp->sc_qp; | |||||
cqp_info->in.u.qp_destroy.scratch = (uintptr_t) cqp_request; | |||||
cqp_info->in.u.qp_destroy.remove_hash_idx = TRUE; | |||||
status = i40iw_handle_cqp_op(iwdev, cqp_request); | |||||
if (status) | |||||
device_printf(iwdev->ldev->dev, "CQP-OP Destroy QP fail"); | |||||
} | |||||
/** | |||||
* i40iw_get_qp - get qp address | |||||
* @device: iwarp device | |||||
* @qpn: qp number | |||||
*/ | |||||
struct ib_qp * | |||||
i40iw_get_qp(struct ib_device *device, int qpn) | |||||
{ | |||||
struct i40iw_device *iwdev = to_iwdev(device); | |||||
if ((qpn < IW_FIRST_QPN) || (qpn >= iwdev->max_qp)) | |||||
return NULL; | |||||
return &iwdev->qp_table[qpn]->ibqp; | |||||
} | |||||
/** | |||||
* i40iw_debug_buf - print debug msg and buffer is mask set | |||||
* @dev: hardware control device structure | |||||
* @mask: mask to compare if to print debug buffer | |||||
* @buf: points buffer addr | |||||
* @size: saize of buffer to print | |||||
*/ | |||||
void | |||||
i40iw_debug_buf(struct i40iw_sc_dev *dev, enum i40iw_debug_flag mask, | |||||
char *desc, u64 * buf, u32 size) | |||||
{ | |||||
u32 i; | |||||
if (!(dev->debug_mask & mask)) | |||||
return; | |||||
i40iw_debug(dev, mask, "%s\n", desc); | |||||
i40iw_debug(dev, mask, "starting address virt=%p phy=%llxh\n", buf, | |||||
(unsigned long long)i40iw_get_virt_to_phy(buf)); | |||||
for (i = 0; i < size; i += 8) | |||||
i40iw_debug(dev, mask, "index %03d val: %016lx\n", i, buf[i / 8]); | |||||
} | |||||
/** | |||||
* i40iw_get_hw_addr - return hw addr | |||||
* @par: points to shared dev | |||||
*/ | |||||
u8 * | |||||
i40iw_get_hw_addr(void *par) | |||||
{ | |||||
struct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)par; | |||||
return dev->hw->hw_addr; | |||||
} | |||||
/** | |||||
* i40iw_remove_head - return head entry and remove from list | |||||
* @list: list for entry | |||||
*/ | |||||
void * | |||||
i40iw_remove_head(struct list_head *list) | |||||
{ | |||||
struct list_head *entry; | |||||
if (list_empty(list)) | |||||
return NULL; | |||||
entry = (void *)list->next; | |||||
list_del(entry); | |||||
return (void *)entry; | |||||
} | |||||
inline u32 | |||||
i40iw_rd32(struct i40iw_dev_ctx *dev_ctx, u32 reg) | |||||
{ | |||||
KASSERT(reg < dev_ctx->mem_bus_space_size, | |||||
("iw_ixl: register offset %#jx too large (max is %#jx)", | |||||
(uintmax_t) reg, (uintmax_t) dev_ctx->mem_bus_space_size)); | |||||
return (bus_space_read_4(dev_ctx->mem_bus_space_tag, | |||||
dev_ctx->mem_bus_space_handle, reg)); | |||||
} | |||||
inline void | |||||
i40iw_wr32(struct i40iw_dev_ctx *dev_ctx, u32 reg, u32 value) | |||||
{ | |||||
KASSERT(reg < dev_ctx->mem_bus_space_size, | |||||
("iw_ixl: register offset %#jx too large (max is %#jx)", | |||||
(uintmax_t) reg, (uintmax_t) dev_ctx->mem_bus_space_size)); | |||||
bus_space_write_4(dev_ctx->mem_bus_space_tag, | |||||
dev_ctx->mem_bus_space_handle, reg, value); | |||||
} | |||||
inline u64 | |||||
i40iw_rd64(struct i40iw_dev_ctx *dev_ctx, u32 reg) | |||||
{ | |||||
KASSERT(reg < dev_ctx->mem_bus_space_size, | |||||
("iw_ixl: register offset %#jx too large (max is %#jx)", | |||||
(uintmax_t) reg, (uintmax_t) dev_ctx->mem_bus_space_size)); | |||||
return (bus_space_read_8(dev_ctx->mem_bus_space_tag, | |||||
dev_ctx->mem_bus_space_handle, reg)); | |||||
} | |||||
inline void | |||||
i40iw_wr64(struct i40iw_dev_ctx *dev_ctx, u32 reg, u64 value) | |||||
{ | |||||
KASSERT(reg < dev_ctx->mem_bus_space_size, | |||||
("iw_ixl: register offset %#jx too large (max is %#jx)", | |||||
(uintmax_t) reg, (uintmax_t) dev_ctx->mem_bus_space_size)); | |||||
bus_space_write_8(dev_ctx->mem_bus_space_tag, | |||||
dev_ctx->mem_bus_space_handle, reg, value); | |||||
} | |||||
static void | |||||
i40iw_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error) | |||||
{ | |||||
if (error) | |||||
return; | |||||
*(bus_addr_t *) arg = segs->ds_addr; | |||||
return; | |||||
} | |||||
/** | |||||
* i40iw_allocate_dma_mem - Memory alloc helper fn | |||||
* @hw: pointer to the HW structure | |||||
* @mem: ptr to mem struct to fill out | |||||
* @size: size of memory requested | |||||
* @alignment: what to align the allocation to | |||||
*/ | |||||
enum i40iw_status_code | |||||
i40iw_allocate_dma_mem(struct i40iw_hw *hw, struct i40iw_dma_mem *mem, | |||||
u64 size, u32 alignment) | |||||
{ | |||||
struct i40iw_dev_ctx *dev_ctx = | |||||
(struct i40iw_dev_ctx *)hw->dev_context; | |||||
device_t dev = dev_ctx->dev; | |||||
int ret; | |||||
ret = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ | |||||
alignment, 0, /* alignment, bounds */ | |||||
BUS_SPACE_MAXADDR, /* lowaddr */ | |||||
BUS_SPACE_MAXADDR, /* highaddr */ | |||||
NULL, NULL, /* filter, filterarg */ | |||||
size, /* maxsize */ | |||||
1, /* nsegments */ | |||||
size, /* maxsegsize */ | |||||
BUS_DMA_ALLOCNOW, /* flags */ | |||||
NULL, /* lockfunc */ | |||||
NULL, /* lockfuncarg */ | |||||
&mem->tag); | |||||
if (ret != 0) { | |||||
device_printf(dev, "%s: bus_dma_tag_create failed, error %u\n", | |||||
__func__, ret); | |||||
goto fail_0; | |||||
} | |||||
ret = bus_dmamem_alloc(mem->tag, (void **)&mem->va, | |||||
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map); | |||||
if (ret != 0) { | |||||
device_printf(dev, "%s: bus_dmamem_alloc failed, error %u\n", | |||||
__func__, ret); | |||||
goto fail_1; | |||||
} | |||||
ret = bus_dmamap_load(mem->tag, mem->map, mem->va, size, | |||||
i40iw_dmamap_cb, &mem->pa, BUS_DMA_NOWAIT); | |||||
if (ret != 0) { | |||||
device_printf(dev, "%s: bus_dmamap_load failed, error %u\n", | |||||
__func__, ret); | |||||
goto fail_2; | |||||
} | |||||
mem->nseg = 1; | |||||
mem->size = size; | |||||
bus_dmamap_sync(mem->tag, mem->map, | |||||
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | |||||
return (0); | |||||
fail_2: | |||||
bus_dmamem_free(mem->tag, mem->va, mem->map); | |||||
fail_1: | |||||
bus_dma_tag_destroy(mem->tag); | |||||
fail_0: | |||||
mem->map = NULL; | |||||
mem->tag = NULL; | |||||
return (I40IW_ERR_NO_MEMORY); | |||||
} | |||||
/** | |||||
* ixliw_free_dma_mem - Memory free helper fn | |||||
* @mem: ptr to mem struct to free | |||||
*/ | |||||
void | |||||
ixliw_free_dma_mem(struct i40iw_dma_mem *mem) | |||||
{ | |||||
if (!mem) | |||||
return; | |||||
bus_dmamap_sync(mem->tag, mem->map, | |||||
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); | |||||
bus_dmamap_unload(mem->tag, mem->map); | |||||
if (!mem->va) | |||||
return; | |||||
bus_dmamem_free(mem->tag, mem->va, mem->map); | |||||
bus_dma_tag_destroy(mem->tag); | |||||
} | |||||
/** | |||||
* i40iw_allocate_virt_mem - virtual memory alloc helper fn | |||||
* @hw: pointer to the HW structure | |||||
* @mem: ptr to mem struct to fill out | |||||
* @size: size of memory requested | |||||
*/ | |||||
enum i40iw_status_code | |||||
i40iw_allocate_virt_mem(struct i40iw_hw *hw, struct i40iw_virt_mem *mem, | |||||
u32 size) | |||||
{ | |||||
if (!mem) | |||||
return (I40IW_ERR_PARAM); | |||||
mem->size = size; | |||||
mem->va = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); | |||||
if (mem->va) | |||||
return (0); | |||||
else | |||||
return (I40IW_ERR_NO_MEMORY); | |||||
} | |||||
/** | |||||
* i40iw_free_virt_mem - virtual memory free helper fn | |||||
* @hw: pointer to the HW structure | |||||
* @mem: ptr to mem struct to free | |||||
*/ | |||||
enum i40iw_status_code | |||||
i40iw_free_virt_mem(struct i40iw_hw *hw, struct i40iw_virt_mem *mem) | |||||
{ | |||||
if (!mem) | |||||
return (I40IW_ERR_PARAM); | |||||
free(mem->va, M_DEVBUF); | |||||
return (0); | |||||
} | |||||
/** | |||||
* i40iw_cqp_sds_cmd - create cqp command for sd | |||||
* @dev: hardware control device structure | |||||
* @sd_info: information for sd cqp | |||||
* | |||||
*/ | |||||
enum i40iw_status_code | |||||
i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev, | |||||
struct i40iw_update_sds_info *sdinfo) | |||||
{ | |||||
enum i40iw_status_code status; | |||||
struct i40iw_cqp_request *cqp_request; | |||||
struct cqp_commands_info *cqp_info; | |||||
struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; | |||||
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, TRUE); | |||||
if (!cqp_request) | |||||
return I40IW_ERR_NO_MEMORY; | |||||
cqp_info = &cqp_request->info; | |||||
memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo, | |||||
sizeof(cqp_info->in.u.update_pe_sds.info)); | |||||
cqp_info->cqp_cmd = OP_UPDATE_PE_SDS; | |||||
cqp_info->post_sq = 1; | |||||
cqp_info->in.u.update_pe_sds.dev = dev; | |||||
cqp_info->in.u.update_pe_sds.scratch = (uintptr_t) cqp_request; | |||||
status = i40iw_handle_cqp_op(iwdev, cqp_request); | |||||
if (status) | |||||
DPRINTF("CQP-OP Update SD's fail"); | |||||
return status; | |||||
} | |||||
/** | |||||
* i40iw_cqp_cq_create_cmd - create a cq for the cqp | |||||
* @dev: device pointer | |||||
* @cq: pointer to created cq | |||||
*/ | |||||
enum i40iw_status_code | |||||
i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev, | |||||
struct i40iw_sc_cq *cq) | |||||
{ | |||||
struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; | |||||
struct i40iw_cqp *iwcqp = &iwdev->cqp; | |||||
struct i40iw_cqp_request *cqp_request; | |||||
struct cqp_commands_info *cqp_info; | |||||
enum i40iw_status_code status; | |||||
cqp_request = i40iw_get_cqp_request(iwcqp, TRUE); | |||||
if (!cqp_request) | |||||
return I40IW_ERR_NO_MEMORY; | |||||
cqp_info = &cqp_request->info; | |||||
cqp_info->cqp_cmd = OP_CQ_CREATE; | |||||
cqp_info->post_sq = 1; | |||||
cqp_info->in.u.cq_create.cq = cq; | |||||
cqp_info->in.u.cq_create.scratch = (uintptr_t) cqp_request; | |||||
status = i40iw_handle_cqp_op(iwdev, cqp_request); | |||||
if (status) | |||||
i40iw_pr_err("CQP-OP Create QP fail"); | |||||
return status; | |||||
} | |||||
/** | |||||
* i40iw_cqp_qp_create_cmd - create a qp for the cqp | |||||
* @dev: device pointer | |||||
* @qp: pointer to created qp | |||||
*/ | |||||
enum i40iw_status_code | |||||
i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev, | |||||
struct i40iw_sc_qp *qp) | |||||
{ | |||||
struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; | |||||
struct i40iw_cqp *iwcqp = &iwdev->cqp; | |||||
struct i40iw_cqp_request *cqp_request; | |||||
struct cqp_commands_info *cqp_info; | |||||
struct i40iw_create_qp_info *qp_info; | |||||
enum i40iw_status_code status; | |||||
cqp_request = i40iw_get_cqp_request(iwcqp, TRUE); | |||||
if (!cqp_request) | |||||
return I40IW_ERR_NO_MEMORY; | |||||
cqp_info = &cqp_request->info; | |||||
qp_info = &cqp_request->info.in.u.qp_create.info; | |||||
memset(qp_info, 0, sizeof(*qp_info)); | |||||
qp_info->cq_num_valid = TRUE; | |||||
qp_info->next_iwarp_state = I40IW_QP_STATE_RTS; | |||||
cqp_info->cqp_cmd = OP_QP_CREATE; | |||||
cqp_info->post_sq = 1; | |||||
cqp_info->in.u.qp_create.qp = qp; | |||||
cqp_info->in.u.qp_create.scratch = (uintptr_t) cqp_request; | |||||
status = i40iw_handle_cqp_op(iwdev, cqp_request); | |||||
if (status) | |||||
i40iw_pr_err("CQP-OP QP create fail"); | |||||
return status; | |||||
} | |||||
/** | |||||
* i40iw_cqp_cq_destroy_cmd - destroy the cqp cq | |||||
* @dev: device pointer | |||||
* @cq: pointer to cq | |||||
*/ | |||||
void | |||||
i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq) | |||||
{ | |||||
struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; | |||||
i40iw_cq_wq_destroy(iwdev, cq); | |||||
} | |||||
/** | |||||
* i40iw_cqp_qp_destroy_cmd - destroy the cqp | |||||
* @dev: device pointer | |||||
* @qp: pointer to qp | |||||
*/ | |||||
void | |||||
i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp) | |||||
{ | |||||
struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; | |||||
struct i40iw_cqp *iwcqp = &iwdev->cqp; | |||||
struct i40iw_cqp_request *cqp_request; | |||||
struct cqp_commands_info *cqp_info; | |||||
enum i40iw_status_code status; | |||||
cqp_request = i40iw_get_cqp_request(iwcqp, TRUE); | |||||
if (!cqp_request) | |||||
return; | |||||
cqp_info = &cqp_request->info; | |||||
memset(cqp_info, 0, sizeof(*cqp_info)); | |||||
cqp_info->cqp_cmd = OP_QP_DESTROY; | |||||
cqp_info->post_sq = 1; | |||||
cqp_info->in.u.qp_destroy.qp = qp; | |||||
cqp_info->in.u.qp_destroy.scratch = (uintptr_t) cqp_request; | |||||
cqp_info->in.u.qp_destroy.remove_hash_idx = TRUE; | |||||
status = i40iw_handle_cqp_op(iwdev, cqp_request); | |||||
if (status) | |||||
i40iw_pr_err("CQP QP_DESTROY fail"); | |||||
} | |||||
/** | |||||
* i40iw_term_modify_qp - modify qp for term message | |||||
* @qp: hardware control qp | |||||
* @next_state: qp's next state | |||||
* @term: terminate code | |||||
* @term_len: length | |||||
*/ | |||||
void | |||||
i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len) | |||||
{ | |||||
struct i40iw_qp *iwqp; | |||||
iwqp = (struct i40iw_qp *)qp->back_qp; | |||||
i40iw_next_iw_state(iwqp, next_state, 0, term, term_len); | |||||
}; | |||||
/** | |||||
* i40iw_terminate_done - after terminate is completed | |||||
* @qp: hardware control qp | |||||
* @timeout_occurred: indicates if terminate timer expired | |||||
*/ | |||||
void | |||||
i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred) | |||||
{ | |||||
struct i40iw_qp *iwqp; | |||||
unsigned long flags; | |||||
u32 next_iwarp_state = I40IW_QP_STATE_ERROR; | |||||
u8 hte = 0; | |||||
bool first_time; | |||||
iwqp = (struct i40iw_qp *)qp->back_qp; | |||||
spin_lock_irqsave(&iwqp->lock, flags); | |||||
if (iwqp->hte_added) { | |||||
iwqp->hte_added = 0; | |||||
hte = 1; | |||||
} | |||||
first_time = !(qp->term_flags & I40IW_TERM_DONE); | |||||
qp->term_flags |= I40IW_TERM_DONE; | |||||
spin_unlock_irqrestore(&iwqp->lock, flags); | |||||
if (first_time) { | |||||
if (!timeout_occurred) | |||||
i40iw_terminate_del_timer(qp); | |||||
else | |||||
next_iwarp_state = I40IW_QP_STATE_CLOSING; | |||||
i40iw_next_iw_state(iwqp, next_iwarp_state, hte, 0, 0); | |||||
i40iw_cm_disconn(iwqp); | |||||
} | |||||
} | |||||
/** | |||||
* i40iw_terminate_imeout - timeout happened | |||||
* @context: points to iwarp qp | |||||
*/ | |||||
static void | |||||
i40iw_terminate_timeout(unsigned long context) | |||||
{ | |||||
struct i40iw_qp *iwqp = (struct i40iw_qp *)context; | |||||
struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp; | |||||
i40iw_terminate_done(qp, 1); | |||||
i40iw_rem_ref(&iwqp->ibqp); | |||||
} | |||||
/** | |||||
* i40iw_terminate_start_timer - start terminate timeout | |||||
* @qp: hardware control qp | |||||
*/ | |||||
void | |||||
i40iw_terminate_start_timer(struct i40iw_sc_qp *qp) | |||||
{ | |||||
struct i40iw_qp *iwqp; | |||||
iwqp = (struct i40iw_qp *)qp->back_qp; | |||||
i40iw_add_ref(&iwqp->ibqp); | |||||
init_timer(&iwqp->terminate_timer); | |||||
iwqp->terminate_timer.function = i40iw_terminate_timeout; | |||||
iwqp->terminate_timer.expires = jiffies + HZ; | |||||
iwqp->terminate_timer.data = (unsigned long)iwqp; | |||||
add_timer(&iwqp->terminate_timer); | |||||
} | |||||
/** | |||||
* i40iw_terminate_del_timer - delete terminate timeout | |||||
* @qp: hardware control qp | |||||
*/ | |||||
void | |||||
i40iw_terminate_del_timer(struct i40iw_sc_qp *qp) | |||||
{ | |||||
struct i40iw_qp *iwqp; | |||||
int ret; | |||||
iwqp = (struct i40iw_qp *)qp->back_qp; | |||||
ret = del_timer(&iwqp->terminate_timer); | |||||
if (ret) | |||||
i40iw_rem_ref(&iwqp->ibqp); | |||||
} | |||||
/** | |||||
* i40iw_ieq_mpa_crc_ae - generate AE for crc error | |||||
* @dev: hardware control device structure | |||||
* @qp: hardware control qp | |||||
*/ | |||||
void | |||||
i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp) | |||||
{ | |||||
struct i40iw_qp_flush_info info; | |||||
struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; | |||||
i40iw_debug(dev, I40IW_DEBUG_AEQ, "%s entered\n", __func__); | |||||
memset(&info, 0, sizeof(info)); | |||||
info.ae_code = I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR; | |||||
info.generate_ae = TRUE; | |||||
info.ae_source = 0x3; | |||||
(void)i40iw_hw_flush_wqes(iwdev, qp, &info, FALSE); | |||||
} | |||||
enum i40iw_status_code | |||||
i40iw_init_hash_desc(void **desc) | |||||
{ | |||||
return 0; | |||||
} | |||||
void | |||||
i40iw_free_hash_desc(void *desc) | |||||
{ | |||||
return; | |||||
} | |||||
/** | |||||
* i40iw_ieq_check_mpacrc - check if mpa crc is OK | |||||
* @desc: desc for hash | |||||
* @addr: address of buffer for crc | |||||
* @length: length of buffer | |||||
* @value: value to be compared | |||||
*/ | |||||
enum i40iw_status_code | |||||
i40iw_ieq_check_mpacrc(void *desc, void *addr, u32 length, | |||||
u32 value) | |||||
{ | |||||
u32 crc = calculate_crc32c(0xffffffff, addr, length) ^ 0xffffffff; | |||||
enum i40iw_status_code ret_code = 0; | |||||
printf("%s: begin", __func__); | |||||
if (crc != value) { | |||||
i40iw_pr_err("mpa crc check fail\n"); | |||||
ret_code = I40IW_ERR_MPA_CRC; | |||||
} | |||||
printf("%s: result crc=%x value=%x", __func__, crc, value); | |||||
return ret_code; | |||||
} | |||||
/** | |||||
* i40iw_ieq_get_qp - get qp based on quad in puda buffer | |||||
* @dev: hardware control device structure | |||||
* @buf: receive puda buffer on exception q | |||||
*/ | |||||
struct i40iw_sc_qp * | |||||
i40iw_ieq_get_qp(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *buf) | |||||
{ | |||||
struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; | |||||
struct i40iw_qp *iwqp; | |||||
struct i40iw_cm_node *cm_node; | |||||
struct ip6_hdr *ip6h; | |||||
struct ip *iph = (struct ip *)buf->iph; | |||||
struct tcphdr *tcph = (struct tcphdr *)buf->tcph; | |||||
u32 loc_addr[4], rem_addr[4]; | |||||
u16 loc_port, rem_port; | |||||
if (iph->ip_v == 4) { | |||||
memset(loc_addr, 0, sizeof(loc_addr)); | |||||
loc_addr[0] = ntohl(iph->ip_dst.s_addr); | |||||
memset(rem_addr, 0, sizeof(rem_addr)); | |||||
rem_addr[0] = ntohl(iph->ip_src.s_addr); | |||||
} else { | |||||
ip6h = (struct ip6_hdr *)buf->iph; | |||||
i40iw_copy_ip_ntohl(loc_addr, (__be32 *) & ip6h->ip6_dst); | |||||
i40iw_copy_ip_ntohl(rem_addr, (__be32 *) & ip6h->ip6_src); | |||||
} | |||||
loc_port = ntohs(tcph->th_sport); | |||||
rem_port = ntohs(tcph->th_dport); | |||||
cm_node = i40iw_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port, | |||||
loc_addr, FALSE); | |||||
if (!cm_node) | |||||
return NULL; | |||||
iwqp = cm_node->iwqp; | |||||
return &iwqp->sc_qp; | |||||
} | |||||
/** | |||||
* i40iw_ieq_update_tcpip_info - update tcpip in the buffer | |||||
* @buf: puda to update | |||||
* @length: length of buffer | |||||
* @seqnum: seq number for tcp | |||||
*/ | |||||
void | |||||
i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, u32 seqnum) | |||||
{ | |||||
struct tcphdr *tcph; | |||||
struct ip *iph; | |||||
u16 iphlen; | |||||
u16 packetsize; | |||||
u8 *addr = (u8 *) buf->mem.va; | |||||
iphlen = (buf->ipv4) ? 20 : 40; | |||||
iph = (struct ip *)(addr + buf->maclen); | |||||
tcph = (struct tcphdr *)(addr + buf->maclen + iphlen); | |||||
packetsize = length + buf->tcphlen + iphlen; | |||||
iph->ip_len = htons(packetsize); | |||||
tcph->th_seq = htonl(seqnum); | |||||
} | |||||
/** | |||||
* i40iw_puda_get_tcpip_info - get tcpip info from puda buffer | |||||
* @info: to get information | |||||
* @buf: puda buffer | |||||
*/ | |||||
enum i40iw_status_code | |||||
i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info, | |||||
struct i40iw_puda_buf *buf) | |||||
{ | |||||
struct ip *iph; | |||||
struct ip6_hdr *ip6h; | |||||
struct tcphdr *tcph; | |||||
struct ether_header *ethh = (struct ether_header *)buf->mem.va; | |||||
u16 iphlen; | |||||
u16 pkt_len; | |||||
u8 *mem = (u8 *) buf->mem.va; | |||||
if (ethh->ether_type == htons(ETH_P_8021Q)) { | |||||
info->vlan_valid = TRUE; | |||||
buf->vlan_id = ntohs(((struct ether_vlan_header *)ethh)->evl_tag) & EVL_VLID_MASK; | |||||
} | |||||
buf->maclen = (info->vlan_valid) ? 18 : 14; | |||||
iphlen = (info->l3proto) ? 40 : 20; | |||||
buf->ipv4 = (info->l3proto) ? FALSE : TRUE; | |||||
buf->iph = mem + buf->maclen; | |||||
iph = (struct ip *)buf->iph; | |||||
buf->tcph = buf->iph + iphlen; | |||||
tcph = (struct tcphdr *)buf->tcph; | |||||
if (buf->ipv4) { | |||||
pkt_len = ntohs(iph->ip_len); | |||||
} else { | |||||
ip6h = (struct ip6_hdr *)buf->iph; | |||||
pkt_len = ntohs(ip6h->ip6_plen) + iphlen; | |||||
} | |||||
buf->totallen = pkt_len + buf->maclen; | |||||
if (info->payload_len < buf->totallen) { | |||||
i40iw_pr_err("payload_len = 0x%x totallen expected0x%x\n", | |||||
info->payload_len, buf->totallen); | |||||
return I40IW_ERR_INVALID_SIZE; | |||||
} | |||||
buf->tcphlen = (tcph->th_off) << 2; | |||||
buf->datalen = pkt_len - iphlen - buf->tcphlen; | |||||
buf->data = (buf->datalen) ? buf->tcph + buf->tcphlen : NULL; | |||||
buf->hdrlen = buf->maclen + iphlen + buf->tcphlen; | |||||
buf->seqnum = ntohl(tcph->th_seq); | |||||
return 0; | |||||
} |