Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/ixl/ixlvc.c
/****************************************************************************** | /****************************************************************************** | ||||
Copyright (c) 2013-2015, Intel Corporation | Copyright (c) 2013-2017, Intel Corporation | ||||
All rights reserved. | All rights reserved. | ||||
Redistribution and use in source and binary forms, with or without | Redistribution and use in source and binary forms, with or without | ||||
modification, are permitted provided that the following conditions are met: | modification, are permitted provided that the following conditions are met: | ||||
1. Redistributions of source code must retain the above copyright notice, | 1. Redistributions of source code must retain the above copyright notice, | ||||
this list of conditions and the following disclaimer. | this list of conditions and the following disclaimer. | ||||
Show All 31 Lines | |||||
#include "i40e_prototype.h" | #include "i40e_prototype.h" | ||||
/* busy wait delay in msec */ | /* busy wait delay in msec */ | ||||
#define IXLV_BUSY_WAIT_DELAY 10 | #define IXLV_BUSY_WAIT_DELAY 10 | ||||
#define IXLV_BUSY_WAIT_COUNT 50 | #define IXLV_BUSY_WAIT_COUNT 50 | ||||
static void ixl_vc_process_resp(struct ixl_vc_mgr *, uint32_t, | static void ixl_vc_process_resp(struct ixl_vc_mgr *, uint32_t, | ||||
enum i40e_status_code); | enum virtchnl_status_code); | ||||
static void ixl_vc_process_next(struct ixl_vc_mgr *mgr); | static void ixl_vc_process_next(struct ixl_vc_mgr *mgr); | ||||
static void ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr); | static void ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr); | ||||
static void ixl_vc_send_current(struct ixl_vc_mgr *mgr); | static void ixl_vc_send_current(struct ixl_vc_mgr *mgr); | ||||
#ifdef IXL_DEBUG | #ifdef IXL_DEBUG | ||||
/* | /* | ||||
** Validate VF messages | ** Validate VF messages | ||||
*/ | */ | ||||
static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode, | static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode, | ||||
u8 *msg, u16 msglen) | u8 *msg, u16 msglen) | ||||
{ | { | ||||
bool err_msg_format = false; | bool err_msg_format = false; | ||||
int valid_len; | int valid_len; | ||||
/* Validate message length. */ | /* Validate message length. */ | ||||
switch (v_opcode) { | switch (v_opcode) { | ||||
case I40E_VIRTCHNL_OP_VERSION: | case VIRTCHNL_OP_VERSION: | ||||
valid_len = sizeof(struct i40e_virtchnl_version_info); | valid_len = sizeof(struct virtchnl_version_info); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_RESET_VF: | case VIRTCHNL_OP_RESET_VF: | ||||
valid_len = 0; | valid_len = 0; | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: | case VIRTCHNL_OP_GET_VF_RESOURCES: | ||||
/* Valid length in api v1.0 is 0, v1.1 is 4 */ | /* Valid length in api v1.0 is 0, v1.1 is 4 */ | ||||
valid_len = 4; | valid_len = 4; | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: | case VIRTCHNL_OP_CONFIG_TX_QUEUE: | ||||
valid_len = sizeof(struct i40e_virtchnl_txq_info); | valid_len = sizeof(struct virtchnl_txq_info); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE: | case VIRTCHNL_OP_CONFIG_RX_QUEUE: | ||||
valid_len = sizeof(struct i40e_virtchnl_rxq_info); | valid_len = sizeof(struct virtchnl_rxq_info); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: | case VIRTCHNL_OP_CONFIG_VSI_QUEUES: | ||||
valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info); | valid_len = sizeof(struct virtchnl_vsi_queue_config_info); | ||||
if (msglen >= valid_len) { | if (msglen >= valid_len) { | ||||
struct i40e_virtchnl_vsi_queue_config_info *vqc = | struct virtchnl_vsi_queue_config_info *vqc = | ||||
(struct i40e_virtchnl_vsi_queue_config_info *)msg; | (struct virtchnl_vsi_queue_config_info *)msg; | ||||
valid_len += (vqc->num_queue_pairs * | valid_len += (vqc->num_queue_pairs * | ||||
sizeof(struct | sizeof(struct | ||||
i40e_virtchnl_queue_pair_info)); | virtchnl_queue_pair_info)); | ||||
if (vqc->num_queue_pairs == 0) | if (vqc->num_queue_pairs == 0) | ||||
err_msg_format = true; | err_msg_format = true; | ||||
} | } | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: | case VIRTCHNL_OP_CONFIG_IRQ_MAP: | ||||
valid_len = sizeof(struct i40e_virtchnl_irq_map_info); | valid_len = sizeof(struct virtchnl_irq_map_info); | ||||
if (msglen >= valid_len) { | if (msglen >= valid_len) { | ||||
struct i40e_virtchnl_irq_map_info *vimi = | struct virtchnl_irq_map_info *vimi = | ||||
(struct i40e_virtchnl_irq_map_info *)msg; | (struct virtchnl_irq_map_info *)msg; | ||||
valid_len += (vimi->num_vectors * | valid_len += (vimi->num_vectors * | ||||
sizeof(struct i40e_virtchnl_vector_map)); | sizeof(struct virtchnl_vector_map)); | ||||
if (vimi->num_vectors == 0) | if (vimi->num_vectors == 0) | ||||
err_msg_format = true; | err_msg_format = true; | ||||
} | } | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_ENABLE_QUEUES: | case VIRTCHNL_OP_ENABLE_QUEUES: | ||||
case I40E_VIRTCHNL_OP_DISABLE_QUEUES: | case VIRTCHNL_OP_DISABLE_QUEUES: | ||||
valid_len = sizeof(struct i40e_virtchnl_queue_select); | valid_len = sizeof(struct virtchnl_queue_select); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: | case VIRTCHNL_OP_ADD_ETH_ADDR: | ||||
case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: | case VIRTCHNL_OP_DEL_ETH_ADDR: | ||||
valid_len = sizeof(struct i40e_virtchnl_ether_addr_list); | valid_len = sizeof(struct virtchnl_ether_addr_list); | ||||
if (msglen >= valid_len) { | if (msglen >= valid_len) { | ||||
struct i40e_virtchnl_ether_addr_list *veal = | struct virtchnl_ether_addr_list *veal = | ||||
(struct i40e_virtchnl_ether_addr_list *)msg; | (struct virtchnl_ether_addr_list *)msg; | ||||
valid_len += veal->num_elements * | valid_len += veal->num_elements * | ||||
sizeof(struct i40e_virtchnl_ether_addr); | sizeof(struct virtchnl_ether_addr); | ||||
if (veal->num_elements == 0) | if (veal->num_elements == 0) | ||||
err_msg_format = true; | err_msg_format = true; | ||||
} | } | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_ADD_VLAN: | case VIRTCHNL_OP_ADD_VLAN: | ||||
case I40E_VIRTCHNL_OP_DEL_VLAN: | case VIRTCHNL_OP_DEL_VLAN: | ||||
valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list); | valid_len = sizeof(struct virtchnl_vlan_filter_list); | ||||
if (msglen >= valid_len) { | if (msglen >= valid_len) { | ||||
struct i40e_virtchnl_vlan_filter_list *vfl = | struct virtchnl_vlan_filter_list *vfl = | ||||
(struct i40e_virtchnl_vlan_filter_list *)msg; | (struct virtchnl_vlan_filter_list *)msg; | ||||
valid_len += vfl->num_elements * sizeof(u16); | valid_len += vfl->num_elements * sizeof(u16); | ||||
if (vfl->num_elements == 0) | if (vfl->num_elements == 0) | ||||
err_msg_format = true; | err_msg_format = true; | ||||
} | } | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: | case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: | ||||
valid_len = sizeof(struct i40e_virtchnl_promisc_info); | valid_len = sizeof(struct virtchnl_promisc_info); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_GET_STATS: | case VIRTCHNL_OP_GET_STATS: | ||||
valid_len = sizeof(struct i40e_virtchnl_queue_select); | valid_len = sizeof(struct virtchnl_queue_select); | ||||
break; | break; | ||||
/* These are always errors coming from the VF. */ | /* These are always errors coming from the VF. */ | ||||
case I40E_VIRTCHNL_OP_EVENT: | case VIRTCHNL_OP_EVENT: | ||||
case I40E_VIRTCHNL_OP_UNKNOWN: | case VIRTCHNL_OP_UNKNOWN: | ||||
default: | default: | ||||
return EPERM; | return EPERM; | ||||
break; | break; | ||||
} | } | ||||
/* few more checks */ | /* few more checks */ | ||||
if ((valid_len != msglen) || (err_msg_format)) | if ((valid_len != msglen) || (err_msg_format)) | ||||
return EINVAL; | return EINVAL; | ||||
else | else | ||||
return 0; | return 0; | ||||
} | } | ||||
#endif | #endif | ||||
/* | /* | ||||
** ixlv_send_pf_msg | ** ixlv_send_pf_msg | ||||
** | ** | ||||
** Send message to PF and print status if failure. | ** Send message to PF and print status if failure. | ||||
*/ | */ | ||||
static int | static int | ||||
ixlv_send_pf_msg(struct ixlv_sc *sc, | ixlv_send_pf_msg(struct ixlv_sc *sc, | ||||
enum i40e_virtchnl_ops op, u8 *msg, u16 len) | enum virtchnl_ops op, u8 *msg, u16 len) | ||||
{ | { | ||||
struct i40e_hw *hw = &sc->hw; | struct i40e_hw *hw = &sc->hw; | ||||
device_t dev = sc->dev; | device_t dev = sc->dev; | ||||
i40e_status err; | i40e_status err; | ||||
#ifdef IXL_DEBUG | #ifdef IXL_DEBUG | ||||
/* | /* | ||||
** Pre-validating messages to the PF | ** Pre-validating messages to the PF | ||||
Show All 21 Lines | |||||
** | ** | ||||
** Send API version admin queue message to the PF. The reply is not checked | ** Send API version admin queue message to the PF. The reply is not checked | ||||
** in this function. Returns 0 if the message was successfully | ** in this function. Returns 0 if the message was successfully | ||||
** sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. | ** sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. | ||||
*/ | */ | ||||
int | int | ||||
ixlv_send_api_ver(struct ixlv_sc *sc) | ixlv_send_api_ver(struct ixlv_sc *sc) | ||||
{ | { | ||||
struct i40e_virtchnl_version_info vvi; | struct virtchnl_version_info vvi; | ||||
vvi.major = I40E_VIRTCHNL_VERSION_MAJOR; | vvi.major = VIRTCHNL_VERSION_MAJOR; | ||||
vvi.minor = I40E_VIRTCHNL_VERSION_MINOR; | vvi.minor = VIRTCHNL_VERSION_MINOR; | ||||
return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_VERSION, | return ixlv_send_pf_msg(sc, VIRTCHNL_OP_VERSION, | ||||
(u8 *)&vvi, sizeof(vvi)); | (u8 *)&vvi, sizeof(vvi)); | ||||
} | } | ||||
/* | /* | ||||
** ixlv_verify_api_ver | ** ixlv_verify_api_ver | ||||
** | ** | ||||
** Compare API versions with the PF. Must be called after admin queue is | ** Compare API versions with the PF. Must be called after admin queue is | ||||
** initialized. Returns 0 if API versions match, EIO if | ** initialized. Returns 0 if API versions match, EIO if | ||||
** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty. | ** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty. | ||||
*/ | */ | ||||
int | int | ||||
ixlv_verify_api_ver(struct ixlv_sc *sc) | ixlv_verify_api_ver(struct ixlv_sc *sc) | ||||
{ | { | ||||
struct i40e_virtchnl_version_info *pf_vvi; | struct virtchnl_version_info *pf_vvi; | ||||
struct i40e_hw *hw = &sc->hw; | struct i40e_hw *hw = &sc->hw; | ||||
struct i40e_arq_event_info event; | struct i40e_arq_event_info event; | ||||
device_t dev = sc->dev; | device_t dev = sc->dev; | ||||
i40e_status err; | i40e_status err; | ||||
int retries = 0; | int retries = 0; | ||||
event.buf_len = IXL_AQ_BUF_SZ; | event.buf_len = IXL_AQ_BUF_SZ; | ||||
event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT); | event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT); | ||||
Show All 11 Lines | for (;;) { | ||||
err = i40e_clean_arq_element(hw, &event, NULL); | err = i40e_clean_arq_element(hw, &event, NULL); | ||||
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) | if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) | ||||
continue; | continue; | ||||
else if (err) { | else if (err) { | ||||
err = EIO; | err = EIO; | ||||
goto out_alloc; | goto out_alloc; | ||||
} | } | ||||
if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) != | if ((enum virtchnl_ops)le32toh(event.desc.cookie_high) != | ||||
I40E_VIRTCHNL_OP_VERSION) { | VIRTCHNL_OP_VERSION) { | ||||
DDPRINTF(dev, "Received unexpected op response: %d\n", | DDPRINTF(dev, "Received unexpected op response: %d\n", | ||||
le32toh(event.desc.cookie_high)); | le32toh(event.desc.cookie_high)); | ||||
/* Don't stop looking for expected response */ | /* Don't stop looking for expected response */ | ||||
continue; | continue; | ||||
} | } | ||||
err = (i40e_status)le32toh(event.desc.cookie_low); | err = (i40e_status)le32toh(event.desc.cookie_low); | ||||
if (err) { | if (err) { | ||||
err = EIO; | err = EIO; | ||||
goto out_alloc; | goto out_alloc; | ||||
} else | } else | ||||
break; | break; | ||||
} | } | ||||
pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf; | pf_vvi = (struct virtchnl_version_info *)event.msg_buf; | ||||
if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) || | if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) || | ||||
((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) && | ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) && | ||||
(pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR))) { | (pf_vvi->minor > VIRTCHNL_VERSION_MINOR))) { | ||||
device_printf(dev, "Critical PF/VF API version mismatch!\n"); | device_printf(dev, "Critical PF/VF API version mismatch!\n"); | ||||
err = EIO; | err = EIO; | ||||
} else | } else | ||||
sc->pf_version = pf_vvi->minor; | sc->pf_version = pf_vvi->minor; | ||||
/* Log PF/VF api versions */ | /* Log PF/VF api versions */ | ||||
device_printf(dev, "PF API %d.%d / VF API %d.%d\n", | device_printf(dev, "PF API %d.%d / VF API %d.%d\n", | ||||
pf_vvi->major, pf_vvi->minor, | pf_vvi->major, pf_vvi->minor, | ||||
I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR); | VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR); | ||||
out_alloc: | out_alloc: | ||||
free(event.msg_buf, M_DEVBUF); | free(event.msg_buf, M_DEVBUF); | ||||
out: | out: | ||||
return (err); | return (err); | ||||
} | } | ||||
/* | /* | ||||
** ixlv_send_vf_config_msg | ** ixlv_send_vf_config_msg | ||||
** | ** | ||||
** Send VF configuration request admin queue message to the PF. The reply | ** Send VF configuration request admin queue message to the PF. The reply | ||||
** is not checked in this function. Returns 0 if the message was | ** is not checked in this function. Returns 0 if the message was | ||||
** successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. | ** successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. | ||||
*/ | */ | ||||
int | int | ||||
ixlv_send_vf_config_msg(struct ixlv_sc *sc) | ixlv_send_vf_config_msg(struct ixlv_sc *sc) | ||||
{ | { | ||||
u32 caps; | u32 caps; | ||||
caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 | | caps = VIRTCHNL_VF_OFFLOAD_L2 | | ||||
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF | | VIRTCHNL_VF_OFFLOAD_RSS_PF | | ||||
I40E_VIRTCHNL_VF_OFFLOAD_VLAN; | VIRTCHNL_VF_OFFLOAD_VLAN; | ||||
if (sc->pf_version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS) | if (sc->pf_version == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS) | ||||
return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, | return ixlv_send_pf_msg(sc, VIRTCHNL_OP_GET_VF_RESOURCES, | ||||
NULL, 0); | NULL, 0); | ||||
else | else | ||||
return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, | return ixlv_send_pf_msg(sc, VIRTCHNL_OP_GET_VF_RESOURCES, | ||||
(u8 *)&caps, sizeof(caps)); | (u8 *)&caps, sizeof(caps)); | ||||
} | } | ||||
/* | /* | ||||
** ixlv_get_vf_config | ** ixlv_get_vf_config | ||||
** | ** | ||||
** Get VF configuration from PF and populate hw structure. Must be called after | ** Get VF configuration from PF and populate hw structure. Must be called after | ||||
** admin queue is initialized. Busy waits until response is received from PF, | ** admin queue is initialized. Busy waits until response is received from PF, | ||||
** with maximum timeout. Response from PF is returned in the buffer for further | ** with maximum timeout. Response from PF is returned in the buffer for further | ||||
** processing by the caller. | ** processing by the caller. | ||||
*/ | */ | ||||
int | int | ||||
ixlv_get_vf_config(struct ixlv_sc *sc) | ixlv_get_vf_config(struct ixlv_sc *sc) | ||||
{ | { | ||||
struct i40e_hw *hw = &sc->hw; | struct i40e_hw *hw = &sc->hw; | ||||
device_t dev = sc->dev; | device_t dev = sc->dev; | ||||
struct i40e_arq_event_info event; | struct i40e_arq_event_info event; | ||||
u16 len; | u16 len; | ||||
i40e_status err = 0; | i40e_status err = 0; | ||||
u32 retries = 0; | u32 retries = 0; | ||||
/* Note this assumes a single VSI */ | /* Note this assumes a single VSI */ | ||||
len = sizeof(struct i40e_virtchnl_vf_resource) + | len = sizeof(struct virtchnl_vf_resource) + | ||||
sizeof(struct i40e_virtchnl_vsi_resource); | sizeof(struct virtchnl_vsi_resource); | ||||
event.buf_len = len; | event.buf_len = len; | ||||
event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT); | event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT); | ||||
if (!event.msg_buf) { | if (!event.msg_buf) { | ||||
err = ENOMEM; | err = ENOMEM; | ||||
goto out; | goto out; | ||||
} | } | ||||
for (;;) { | for (;;) { | ||||
err = i40e_clean_arq_element(hw, &event, NULL); | err = i40e_clean_arq_element(hw, &event, NULL); | ||||
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { | if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { | ||||
if (++retries <= IXLV_AQ_MAX_ERR) | if (++retries <= IXLV_AQ_MAX_ERR) | ||||
i40e_msec_pause(10); | i40e_msec_pause(10); | ||||
} else if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) != | } else if ((enum virtchnl_ops)le32toh(event.desc.cookie_high) != | ||||
I40E_VIRTCHNL_OP_GET_VF_RESOURCES) { | VIRTCHNL_OP_GET_VF_RESOURCES) { | ||||
DDPRINTF(dev, "Received a response from PF," | DDPRINTF(dev, "Received a response from PF," | ||||
" opcode %d, error %d", | " opcode %d, error %d", | ||||
le32toh(event.desc.cookie_high), | le32toh(event.desc.cookie_high), | ||||
le32toh(event.desc.cookie_low)); | le32toh(event.desc.cookie_low)); | ||||
retries++; | retries++; | ||||
continue; | continue; | ||||
} else { | } else { | ||||
err = (i40e_status)le32toh(event.desc.cookie_low); | err = (i40e_status)le32toh(event.desc.cookie_low); | ||||
Show All 31 Lines | |||||
** | ** | ||||
** Request that the PF set up our queues. | ** Request that the PF set up our queues. | ||||
*/ | */ | ||||
void | void | ||||
ixlv_configure_queues(struct ixlv_sc *sc) | ixlv_configure_queues(struct ixlv_sc *sc) | ||||
{ | { | ||||
device_t dev = sc->dev; | device_t dev = sc->dev; | ||||
struct ixl_vsi *vsi = &sc->vsi; | struct ixl_vsi *vsi = &sc->vsi; | ||||
struct ixl_queue *que = vsi->queues; | + if_softc_ctx_t scctx = iflib_get_softc_ctx(vsi->ctx); | ||||
+ struct ixl_tx_queue *tx_que = vsi->tx_queues; | |||||
+ struct ixl_rx_queue *rx_que = vsi->rx_queues; | |||||
struct tx_ring *txr; | struct tx_ring *txr; | ||||
struct rx_ring *rxr; | struct rx_ring *rxr; | ||||
int len, pairs; | int len, pairs; | ||||
struct i40e_virtchnl_vsi_queue_config_info *vqci; | struct virtchnl_vsi_queue_config_info *vqci; | ||||
struct i40e_virtchnl_queue_pair_info *vqpi; | struct virtchnl_queue_pair_info *vqpi; | ||||
pairs = vsi->num_queues; | + /* XXX: Linux PF driver wants matching ids in each tx/rx struct, so both TX/RX | ||||
len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) + | + * queues of a pair need to be configured */ | ||||
(sizeof(struct i40e_virtchnl_queue_pair_info) * pairs); | + pairs = max(vsi->num_tx_queues, vsi->num_rx_queues); | ||||
len = sizeof(struct virtchnl_vsi_queue_config_info) + | |||||
(sizeof(struct virtchnl_queue_pair_info) * pairs); | |||||
vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); | vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); | ||||
if (!vqci) { | if (!vqci) { | ||||
device_printf(dev, "%s: unable to allocate memory\n", __func__); | device_printf(dev, "%s: unable to allocate memory\n", __func__); | ||||
ixl_vc_schedule_retry(&sc->vc_mgr); | ixl_vc_schedule_retry(&sc->vc_mgr); | ||||
return; | return; | ||||
} | } | ||||
vqci->vsi_id = sc->vsi_res->vsi_id; | vqci->vsi_id = sc->vsi_res->vsi_id; | ||||
vqci->num_queue_pairs = pairs; | vqci->num_queue_pairs = pairs; | ||||
vqpi = vqci->qpair; | vqpi = vqci->qpair; | ||||
/* Size check is not needed here - HW max is 16 queue pairs, and we | /* Size check is not needed here - HW max is 16 queue pairs, and we | ||||
* can fit info for 31 of them into the AQ buffer before it overflows. | * can fit info for 31 of them into the AQ buffer before it overflows. | ||||
*/ | */ | ||||
for (int i = 0; i < pairs; i++, que++, vqpi++) { | + for (int i = 0; i < pairs; i++, tx_que++, rx_que++, vqpi++) { | ||||
txr = &que->txr; | + txr = &tx_que->txr; | ||||
rxr = &que->rxr; | + rxr = &rx_que->rxr; | ||||
+ | |||||
vqpi->txq.vsi_id = vqci->vsi_id; | vqpi->txq.vsi_id = vqci->vsi_id; | ||||
vqpi->txq.queue_id = i; | vqpi->txq.queue_id = i; | ||||
vqpi->txq.ring_len = que->num_desc; | + vqpi->txq.ring_len = scctx->isc_ntxd[0]; | ||||
vqpi->txq.dma_ring_addr = txr->dma.pa; | + vqpi->txq.dma_ring_addr = txr->tx_paddr; | ||||
/* Enable Head writeback */ | /* Enable Head writeback */ | ||||
vqpi->txq.headwb_enabled = 1; | vqpi->txq.headwb_enabled = 0; | ||||
vqpi->txq.dma_headwb_addr = txr->dma.pa + | vqpi->txq.dma_headwb_addr = 0; | ||||
(que->num_desc * sizeof(struct i40e_tx_desc)); | |||||
vqpi->rxq.vsi_id = vqci->vsi_id; | vqpi->rxq.vsi_id = vqci->vsi_id; | ||||
vqpi->rxq.queue_id = i; | vqpi->rxq.queue_id = i; | ||||
vqpi->rxq.ring_len = que->num_desc; | + vqpi->rxq.ring_len = scctx->isc_nrxd[0]; | ||||
vqpi->rxq.dma_ring_addr = rxr->dma.pa; | + vqpi->rxq.dma_ring_addr = rxr->rx_paddr; | ||||
vqpi->rxq.max_pkt_size = vsi->max_frame_size; | + vqpi->rxq.max_pkt_size = scctx->isc_max_frame_size; | ||||
+ // TODO: Get this value from iflib, somehow | |||||
vqpi->rxq.databuffer_size = rxr->mbuf_sz; | vqpi->rxq.databuffer_size = rxr->mbuf_sz; | ||||
vqpi->rxq.splithdr_enabled = 0; | vqpi->rxq.splithdr_enabled = 0; | ||||
} | } | ||||
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, | ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_VSI_QUEUES, | ||||
(u8 *)vqci, len); | (u8 *)vqci, len); | ||||
free(vqci, M_DEVBUF); | free(vqci, M_DEVBUF); | ||||
} | } | ||||
/* | /* | ||||
** ixlv_enable_queues | ** ixlv_enable_queues | ||||
** | ** | ||||
** Request that the PF enable all of our queues. | ** Request that the PF enable all of our queues. | ||||
*/ | */ | ||||
void | void | ||||
ixlv_enable_queues(struct ixlv_sc *sc) | ixlv_enable_queues(struct ixlv_sc *sc) | ||||
{ | { | ||||
struct i40e_virtchnl_queue_select vqs; | struct virtchnl_queue_select vqs; | ||||
vqs.vsi_id = sc->vsi_res->vsi_id; | vqs.vsi_id = sc->vsi_res->vsi_id; | ||||
+ /* XXX: In Linux PF, as long as neither of these is 0, | |||||
+ * every queue in VF VSI is enabled. */ | |||||
vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1; | vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1; | ||||
vqs.rx_queues = vqs.tx_queues; | vqs.rx_queues = vqs.tx_queues; | ||||
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ENABLE_QUEUES, | ixlv_send_pf_msg(sc, VIRTCHNL_OP_ENABLE_QUEUES, | ||||
(u8 *)&vqs, sizeof(vqs)); | (u8 *)&vqs, sizeof(vqs)); | ||||
} | } | ||||
/* | /* | ||||
** ixlv_disable_queues | ** ixlv_disable_queues | ||||
** | ** | ||||
** Request that the PF disable all of our queues. | ** Request that the PF disable all of our queues. | ||||
*/ | */ | ||||
void | void | ||||
ixlv_disable_queues(struct ixlv_sc *sc) | ixlv_disable_queues(struct ixlv_sc *sc) | ||||
{ | { | ||||
struct i40e_virtchnl_queue_select vqs; | struct virtchnl_queue_select vqs; | ||||
vqs.vsi_id = sc->vsi_res->vsi_id; | vqs.vsi_id = sc->vsi_res->vsi_id; | ||||
+ /* XXX: In Linux PF, as long as neither of these is 0, | |||||
+ * every queue in VF VSI is disabled. */ | |||||
vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1; | vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1; | ||||
vqs.rx_queues = vqs.tx_queues; | vqs.rx_queues = vqs.tx_queues; | ||||
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DISABLE_QUEUES, | ixlv_send_pf_msg(sc, VIRTCHNL_OP_DISABLE_QUEUES, | ||||
(u8 *)&vqs, sizeof(vqs)); | (u8 *)&vqs, sizeof(vqs)); | ||||
} | } | ||||
/* | /* | ||||
** ixlv_map_queues | ** ixlv_map_queues | ||||
** | ** | ||||
** Request that the PF map queues to interrupt vectors. Misc causes, including | ** Request that the PF map queues to interrupt vectors. Misc causes, including | ||||
** admin queue, are always mapped to vector 0. | ** admin queue, are always mapped to vector 0. | ||||
*/ | */ | ||||
void | void | ||||
ixlv_map_queues(struct ixlv_sc *sc) | ixlv_map_queues(struct ixlv_sc *sc) | ||||
{ | { | ||||
struct i40e_virtchnl_irq_map_info *vm; | struct virtchnl_irq_map_info *vm; | ||||
int i, q, len; | int i, q, len; | ||||
struct ixl_vsi *vsi = &sc->vsi; | struct ixl_vsi *vsi = &sc->vsi; | ||||
struct ixl_queue *que = vsi->queues; | + struct ixl_rx_queue *rx_que = vsi->rx_queues; | ||||
+ if_softc_ctx_t scctx = vsi->shared; | |||||
+ device_t dev = sc->dev; | |||||
+ | |||||
+ // XXX: What happens if we only get 1 MSI-X vector? | |||||
+ MPASS(scctx->isc_vectors > 1); | |||||
/* How many queue vectors, adminq uses one */ | /* How many queue vectors, adminq uses one */ | ||||
q = sc->msix - 1; | + // XXX: How do we know how many interrupt vectors we have? | ||||
+ q = scctx->isc_vectors - 1; | |||||
len = sizeof(struct i40e_virtchnl_irq_map_info) + | len = sizeof(struct virtchnl_irq_map_info) + | ||||
(sc->msix * sizeof(struct i40e_virtchnl_vector_map)); | + (scctx->isc_vectors * sizeof(struct i40e_virtchnl_vector_map)); | ||||
vm = malloc(len, M_DEVBUF, M_NOWAIT); | vm = malloc(len, M_DEVBUF, M_NOWAIT); | ||||
if (!vm) { | if (!vm) { | ||||
printf("%s: unable to allocate memory\n", __func__); | + device_printf(dev, "%s: unable to allocate memory\n", __func__); | ||||
ixl_vc_schedule_retry(&sc->vc_mgr); | ixl_vc_schedule_retry(&sc->vc_mgr); | ||||
return; | return; | ||||
} | } | ||||
vm->num_vectors = sc->msix; | + vm->num_vectors = scctx->isc_vectors; | ||||
/* Queue vectors first */ | /* Queue vectors first */ | ||||
for (i = 0; i < q; i++, que++) { | + for (i = 0; i < q; i++, rx_que++) { | ||||
vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id; | vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id; | ||||
vm->vecmap[i].vector_id = i + 1; /* first is adminq */ | vm->vecmap[i].vector_id = i + 1; /* first is adminq */ | ||||
vm->vecmap[i].txq_map = (1 << que->me); | // vm->vecmap[i].txq_map = (1 << que->me); | ||||
vm->vecmap[i].rxq_map = (1 << que->me); | vm->vecmap[i].rxq_map = (1 << rx_que->rxr.me); | ||||
vm->vecmap[i].rxitr_idx = 0; | vm->vecmap[i].rxitr_idx = 0; | ||||
vm->vecmap[i].txitr_idx = 1; | vm->vecmap[i].txitr_idx = 1; | ||||
} | } | ||||
/* Misc vector last - this is only for AdminQ messages */ | /* Misc vector last - this is only for AdminQ messages */ | ||||
vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id; | vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id; | ||||
vm->vecmap[i].vector_id = 0; | vm->vecmap[i].vector_id = 0; | ||||
vm->vecmap[i].txq_map = 0; | vm->vecmap[i].txq_map = 0; | ||||
vm->vecmap[i].rxq_map = 0; | vm->vecmap[i].rxq_map = 0; | ||||
vm->vecmap[i].rxitr_idx = 0; | vm->vecmap[i].rxitr_idx = 0; | ||||
vm->vecmap[i].txitr_idx = 0; | vm->vecmap[i].txitr_idx = 0; | ||||
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, | ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_IRQ_MAP, | ||||
(u8 *)vm, len); | (u8 *)vm, len); | ||||
free(vm, M_DEVBUF); | free(vm, M_DEVBUF); | ||||
} | } | ||||
/* | /* | ||||
** Scan the Filter List looking for vlans that need | ** Scan the Filter List looking for vlans that need | ||||
** to be added, then create the data to hand to the AQ | ** to be added, then create the data to hand to the AQ | ||||
** for handling. | ** for handling. | ||||
*/ | */ | ||||
void | void | ||||
ixlv_add_vlans(struct ixlv_sc *sc) | ixlv_add_vlans(struct ixlv_sc *sc) | ||||
{ | { | ||||
struct i40e_virtchnl_vlan_filter_list *v; | struct virtchnl_vlan_filter_list *v; | ||||
struct ixlv_vlan_filter *f, *ftmp; | struct ixlv_vlan_filter *f, *ftmp; | ||||
device_t dev = sc->dev; | device_t dev = sc->dev; | ||||
int len, i = 0, cnt = 0; | int len, i = 0, cnt = 0; | ||||
/* Get count of VLAN filters to add */ | /* Get count of VLAN filters to add */ | ||||
SLIST_FOREACH(f, sc->vlan_filters, next) { | SLIST_FOREACH(f, sc->vlan_filters, next) { | ||||
if (f->flags & IXL_FILTER_ADD) | if (f->flags & IXL_FILTER_ADD) | ||||
cnt++; | cnt++; | ||||
} | } | ||||
if (!cnt) { /* no work... */ | if (!cnt) { /* no work... */ | ||||
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER, | ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER, | ||||
I40E_SUCCESS); | VIRTCHNL_STATUS_SUCCESS); | ||||
return; | return; | ||||
} | } | ||||
len = sizeof(struct i40e_virtchnl_vlan_filter_list) + | len = sizeof(struct virtchnl_vlan_filter_list) + | ||||
(cnt * sizeof(u16)); | (cnt * sizeof(u16)); | ||||
if (len > IXL_AQ_BUF_SZ) { | if (len > IXL_AQ_BUF_SZ) { | ||||
device_printf(dev, "%s: Exceeded Max AQ Buf size\n", | device_printf(dev, "%s: Exceeded Max AQ Buf size\n", | ||||
__func__); | __func__); | ||||
ixl_vc_schedule_retry(&sc->vc_mgr); | ixl_vc_schedule_retry(&sc->vc_mgr); | ||||
return; | return; | ||||
} | } | ||||
Show All 15 Lines | SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) { | ||||
bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16)); | bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16)); | ||||
f->flags = IXL_FILTER_USED; | f->flags = IXL_FILTER_USED; | ||||
i++; | i++; | ||||
} | } | ||||
if (i == cnt) | if (i == cnt) | ||||
break; | break; | ||||
} | } | ||||
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len); | ixlv_send_pf_msg(sc, VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len); | ||||
free(v, M_DEVBUF); | free(v, M_DEVBUF); | ||||
/* add stats? */ | /* add stats? */ | ||||
} | } | ||||
/* | /* | ||||
** Scan the Filter Table looking for vlans that need | ** Scan the Filter Table looking for vlans that need | ||||
** to be removed, then create the data to hand to the AQ | ** to be removed, then create the data to hand to the AQ | ||||
** for handling. | ** for handling. | ||||
*/ | */ | ||||
void | void | ||||
ixlv_del_vlans(struct ixlv_sc *sc) | ixlv_del_vlans(struct ixlv_sc *sc) | ||||
{ | { | ||||
device_t dev = sc->dev; | device_t dev = sc->dev; | ||||
struct i40e_virtchnl_vlan_filter_list *v; | struct virtchnl_vlan_filter_list *v; | ||||
struct ixlv_vlan_filter *f, *ftmp; | struct ixlv_vlan_filter *f, *ftmp; | ||||
int len, i = 0, cnt = 0; | int len, i = 0, cnt = 0; | ||||
/* Get count of VLAN filters to delete */ | /* Get count of VLAN filters to delete */ | ||||
SLIST_FOREACH(f, sc->vlan_filters, next) { | SLIST_FOREACH(f, sc->vlan_filters, next) { | ||||
if (f->flags & IXL_FILTER_DEL) | if (f->flags & IXL_FILTER_DEL) | ||||
cnt++; | cnt++; | ||||
} | } | ||||
if (!cnt) { /* no work... */ | if (!cnt) { /* no work... */ | ||||
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER, | ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER, | ||||
I40E_SUCCESS); | VIRTCHNL_STATUS_SUCCESS); | ||||
return; | return; | ||||
} | } | ||||
len = sizeof(struct i40e_virtchnl_vlan_filter_list) + | len = sizeof(struct virtchnl_vlan_filter_list) + | ||||
(cnt * sizeof(u16)); | (cnt * sizeof(u16)); | ||||
if (len > IXL_AQ_BUF_SZ) { | if (len > IXL_AQ_BUF_SZ) { | ||||
device_printf(dev, "%s: Exceeded Max AQ Buf size\n", | device_printf(dev, "%s: Exceeded Max AQ Buf size\n", | ||||
__func__); | __func__); | ||||
ixl_vc_schedule_retry(&sc->vc_mgr); | ixl_vc_schedule_retry(&sc->vc_mgr); | ||||
return; | return; | ||||
} | } | ||||
Show All 16 Lines | SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) { | ||||
i++; | i++; | ||||
SLIST_REMOVE(sc->vlan_filters, f, ixlv_vlan_filter, next); | SLIST_REMOVE(sc->vlan_filters, f, ixlv_vlan_filter, next); | ||||
free(f, M_DEVBUF); | free(f, M_DEVBUF); | ||||
} | } | ||||
if (i == cnt) | if (i == cnt) | ||||
break; | break; | ||||
} | } | ||||
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len); | ixlv_send_pf_msg(sc, VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len); | ||||
free(v, M_DEVBUF); | free(v, M_DEVBUF); | ||||
/* add stats? */ | /* add stats? */ | ||||
} | } | ||||
/* | /* | ||||
** This routine takes additions to the vsi filter | ** This routine takes additions to the vsi filter | ||||
** table and creates an Admin Queue call to create | ** table and creates an Admin Queue call to create | ||||
** the filters in the hardware. | ** the filters in the hardware. | ||||
*/ | */ | ||||
void | void | ||||
ixlv_add_ether_filters(struct ixlv_sc *sc) | ixlv_add_ether_filters(struct ixlv_sc *sc) | ||||
{ | { | ||||
struct i40e_virtchnl_ether_addr_list *a; | struct virtchnl_ether_addr_list *a; | ||||
struct ixlv_mac_filter *f; | struct ixlv_mac_filter *f; | ||||
device_t dev = sc->dev; | device_t dev = sc->dev; | ||||
int len, j = 0, cnt = 0; | int len, j = 0, cnt = 0; | ||||
/* Get count of MAC addresses to add */ | /* Get count of MAC addresses to add */ | ||||
SLIST_FOREACH(f, sc->mac_filters, next) { | SLIST_FOREACH(f, sc->mac_filters, next) { | ||||
if (f->flags & IXL_FILTER_ADD) | if (f->flags & IXL_FILTER_ADD) | ||||
cnt++; | cnt++; | ||||
} | } | ||||
if (cnt == 0) { /* Should not happen... */ | if (cnt == 0) { /* Should not happen... */ | ||||
DDPRINTF(dev, "cnt == 0, exiting..."); | DDPRINTF(dev, "cnt == 0, exiting..."); | ||||
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER, | ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER, | ||||
I40E_SUCCESS); | VIRTCHNL_STATUS_SUCCESS); | ||||
return; | return; | ||||
} | } | ||||
len = sizeof(struct i40e_virtchnl_ether_addr_list) + | len = sizeof(struct virtchnl_ether_addr_list) + | ||||
(cnt * sizeof(struct i40e_virtchnl_ether_addr)); | (cnt * sizeof(struct virtchnl_ether_addr)); | ||||
a = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); | a = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); | ||||
if (a == NULL) { | if (a == NULL) { | ||||
device_printf(dev, "%s: Failed to get memory for " | device_printf(dev, "%s: Failed to get memory for " | ||||
"virtchnl_ether_addr_list\n", __func__); | "virtchnl_ether_addr_list\n", __func__); | ||||
ixl_vc_schedule_retry(&sc->vc_mgr); | ixl_vc_schedule_retry(&sc->vc_mgr); | ||||
return; | return; | ||||
} | } | ||||
Show All 11 Lines | if (f->flags & IXL_FILTER_ADD) { | ||||
MAC_FORMAT_ARGS(f->macaddr)); | MAC_FORMAT_ARGS(f->macaddr)); | ||||
} | } | ||||
if (j == cnt) | if (j == cnt) | ||||
break; | break; | ||||
} | } | ||||
DDPRINTF(dev, "len %d, j %d, cnt %d", | DDPRINTF(dev, "len %d, j %d, cnt %d", | ||||
len, j, cnt); | len, j, cnt); | ||||
ixlv_send_pf_msg(sc, | ixlv_send_pf_msg(sc, | ||||
I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)a, len); | VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)a, len); | ||||
/* add stats? */ | /* add stats? */ | ||||
free(a, M_DEVBUF); | free(a, M_DEVBUF); | ||||
return; | return; | ||||
} | } | ||||
/* | /* | ||||
** This routine takes filters flagged for deletion in the | ** This routine takes filters flagged for deletion in the | ||||
** sc MAC filter list and creates an Admin Queue call | ** sc MAC filter list and creates an Admin Queue call | ||||
** to delete those filters in the hardware. | ** to delete those filters in the hardware. | ||||
*/ | */ | ||||
void | void | ||||
ixlv_del_ether_filters(struct ixlv_sc *sc) | ixlv_del_ether_filters(struct ixlv_sc *sc) | ||||
{ | { | ||||
struct i40e_virtchnl_ether_addr_list *d; | struct virtchnl_ether_addr_list *d; | ||||
device_t dev = sc->dev; | device_t dev = sc->dev; | ||||
struct ixlv_mac_filter *f, *f_temp; | struct ixlv_mac_filter *f, *f_temp; | ||||
int len, j = 0, cnt = 0; | int len, j = 0, cnt = 0; | ||||
/* Get count of MAC addresses to delete */ | /* Get count of MAC addresses to delete */ | ||||
SLIST_FOREACH(f, sc->mac_filters, next) { | SLIST_FOREACH(f, sc->mac_filters, next) { | ||||
if (f->flags & IXL_FILTER_DEL) | if (f->flags & IXL_FILTER_DEL) | ||||
cnt++; | cnt++; | ||||
} | } | ||||
if (cnt == 0) { | if (cnt == 0) { | ||||
DDPRINTF(dev, "cnt == 0, exiting..."); | DDPRINTF(dev, "cnt == 0, exiting..."); | ||||
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER, | ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER, | ||||
I40E_SUCCESS); | VIRTCHNL_STATUS_SUCCESS); | ||||
return; | return; | ||||
} | } | ||||
len = sizeof(struct i40e_virtchnl_ether_addr_list) + | len = sizeof(struct virtchnl_ether_addr_list) + | ||||
(cnt * sizeof(struct i40e_virtchnl_ether_addr)); | (cnt * sizeof(struct virtchnl_ether_addr)); | ||||
d = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); | d = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); | ||||
if (d == NULL) { | if (d == NULL) { | ||||
device_printf(dev, "%s: Failed to get memory for " | device_printf(dev, "%s: Failed to get memory for " | ||||
"virtchnl_ether_addr_list\n", __func__); | "virtchnl_ether_addr_list\n", __func__); | ||||
ixl_vc_schedule_retry(&sc->vc_mgr); | ixl_vc_schedule_retry(&sc->vc_mgr); | ||||
return; | return; | ||||
} | } | ||||
Show All 9 Lines | if (f->flags & IXL_FILTER_DEL) { | ||||
j++; | j++; | ||||
SLIST_REMOVE(sc->mac_filters, f, ixlv_mac_filter, next); | SLIST_REMOVE(sc->mac_filters, f, ixlv_mac_filter, next); | ||||
free(f, M_DEVBUF); | free(f, M_DEVBUF); | ||||
} | } | ||||
if (j == cnt) | if (j == cnt) | ||||
break; | break; | ||||
} | } | ||||
ixlv_send_pf_msg(sc, | ixlv_send_pf_msg(sc, | ||||
I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)d, len); | VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)d, len); | ||||
/* add stats? */ | /* add stats? */ | ||||
free(d, M_DEVBUF); | free(d, M_DEVBUF); | ||||
return; | return; | ||||
} | } | ||||
/* | /* | ||||
** ixlv_request_reset | ** ixlv_request_reset | ||||
** Request that the PF reset this VF. No response is expected. | ** Request that the PF reset this VF. No response is expected. | ||||
*/ | */ | ||||
void | void | ||||
ixlv_request_reset(struct ixlv_sc *sc) | ixlv_request_reset(struct ixlv_sc *sc) | ||||
{ | { | ||||
/* | /* | ||||
** Set the reset status to "in progress" before | ** Set the reset status to "in progress" before | ||||
** the request, this avoids any possibility of | ** the request, this avoids any possibility of | ||||
** a mistaken early detection of completion. | ** a mistaken early detection of completion. | ||||
*/ | */ | ||||
wr32(&sc->hw, I40E_VFGEN_RSTAT, I40E_VFR_INPROGRESS); | wr32(&sc->hw, I40E_VFGEN_RSTAT, VIRTCHNL_VFR_INPROGRESS); | ||||
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0); | ixlv_send_pf_msg(sc, VIRTCHNL_OP_RESET_VF, NULL, 0); | ||||
} | } | ||||
/* | /* | ||||
** ixlv_request_stats | ** ixlv_request_stats | ||||
** Request the statistics for this VF's VSI from PF. | ** Request the statistics for this VF's VSI from PF. | ||||
*/ | */ | ||||
void | void | ||||
ixlv_request_stats(struct ixlv_sc *sc) | ixlv_request_stats(struct ixlv_sc *sc) | ||||
{ | { | ||||
struct i40e_virtchnl_queue_select vqs; | struct virtchnl_queue_select vqs; | ||||
int error = 0; | int error = 0; | ||||
vqs.vsi_id = sc->vsi_res->vsi_id; | vqs.vsi_id = sc->vsi_res->vsi_id; | ||||
/* Low priority, we don't need to error check */ | /* Low priority, we don't need to error check */ | ||||
error = ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_STATS, | error = ixlv_send_pf_msg(sc, VIRTCHNL_OP_GET_STATS, | ||||
(u8 *)&vqs, sizeof(vqs)); | (u8 *)&vqs, sizeof(vqs)); | ||||
#ifdef IXL_DEBUG | #ifdef IXL_DEBUG | ||||
if (error) | if (error) | ||||
device_printf(sc->dev, "Error sending stats request to PF: %d\n", error); | device_printf(sc->dev, "Error sending stats request to PF: %d\n", error); | ||||
#endif | #endif | ||||
} | } | ||||
/* | /* | ||||
** Updates driver's stats counters with VSI stats returned from PF. | ** Updates driver's stats counters with VSI stats returned from PF. | ||||
*/ | */ | ||||
void | void | ||||
ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es) | ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es) | ||||
{ | { | ||||
struct ixl_vsi *vsi = &sc->vsi; | struct ixl_vsi *vsi = &sc->vsi; | ||||
uint64_t tx_discards; | uint64_t tx_discards; | ||||
tx_discards = es->tx_discards; | tx_discards = es->tx_discards; | ||||
#if 0 | |||||
for (int i = 0; i < vsi->num_queues; i++) | for (int i = 0; i < vsi->num_queues; i++) | ||||
tx_discards += sc->vsi.queues[i].txr.br->br_drops; | tx_discards += sc->vsi.queues[i].txr.br->br_drops; | ||||
#endif | |||||
/* Update ifnet stats */ | /* Update ifnet stats */ | ||||
IXL_SET_IPACKETS(vsi, es->rx_unicast + | IXL_SET_IPACKETS(vsi, es->rx_unicast + | ||||
es->rx_multicast + | es->rx_multicast + | ||||
es->rx_broadcast); | es->rx_broadcast); | ||||
IXL_SET_OPACKETS(vsi, es->tx_unicast + | IXL_SET_OPACKETS(vsi, es->tx_unicast + | ||||
es->tx_multicast + | es->tx_multicast + | ||||
es->tx_broadcast); | es->tx_broadcast); | ||||
Show All 9 Lines | #endif | ||||
IXL_SET_COLLISIONS(vsi, 0); | IXL_SET_COLLISIONS(vsi, 0); | ||||
vsi->eth_stats = *es; | vsi->eth_stats = *es; | ||||
} | } | ||||
void | void | ||||
ixlv_config_rss_key(struct ixlv_sc *sc) | ixlv_config_rss_key(struct ixlv_sc *sc) | ||||
{ | { | ||||
struct i40e_virtchnl_rss_key *rss_key_msg; | struct virtchnl_rss_key *rss_key_msg; | ||||
int msg_len, key_length; | int msg_len, key_length; | ||||
u8 rss_seed[IXL_RSS_KEY_SIZE]; | u8 rss_seed[IXL_RSS_KEY_SIZE]; | ||||
#ifdef RSS | #ifdef RSS | ||||
/* Fetch the configured RSS key */ | /* Fetch the configured RSS key */ | ||||
rss_getkey((uint8_t *) &rss_seed); | rss_getkey((uint8_t *) &rss_seed); | ||||
#else | #else | ||||
ixl_get_default_rss_key((u32 *)rss_seed); | ixl_get_default_rss_key((u32 *)rss_seed); | ||||
#endif | #endif | ||||
/* Send the fetched key */ | /* Send the fetched key */ | ||||
key_length = IXL_RSS_KEY_SIZE; | key_length = IXL_RSS_KEY_SIZE; | ||||
msg_len = sizeof(struct i40e_virtchnl_rss_key) + (sizeof(u8) * key_length) - 1; | msg_len = sizeof(struct virtchnl_rss_key) + (sizeof(u8) * key_length) - 1; | ||||
rss_key_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO); | rss_key_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO); | ||||
if (rss_key_msg == NULL) { | if (rss_key_msg == NULL) { | ||||
device_printf(sc->dev, "Unable to allocate msg memory for RSS key msg.\n"); | device_printf(sc->dev, "Unable to allocate msg memory for RSS key msg.\n"); | ||||
return; | return; | ||||
} | } | ||||
rss_key_msg->vsi_id = sc->vsi_res->vsi_id; | rss_key_msg->vsi_id = sc->vsi_res->vsi_id; | ||||
rss_key_msg->key_len = key_length; | rss_key_msg->key_len = key_length; | ||||
bcopy(rss_seed, &rss_key_msg->key[0], key_length); | bcopy(rss_seed, &rss_key_msg->key[0], key_length); | ||||
DDPRINTF(sc->dev, "config_rss: vsi_id %d, key_len %d", | DDPRINTF(sc->dev, "config_rss: vsi_id %d, key_len %d", | ||||
rss_key_msg->vsi_id, rss_key_msg->key_len); | rss_key_msg->vsi_id, rss_key_msg->key_len); | ||||
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY, | ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_RSS_KEY, | ||||
(u8 *)rss_key_msg, msg_len); | (u8 *)rss_key_msg, msg_len); | ||||
free(rss_key_msg, M_DEVBUF); | free(rss_key_msg, M_DEVBUF); | ||||
} | } | ||||
void | void | ||||
ixlv_set_rss_hena(struct ixlv_sc *sc) | ixlv_set_rss_hena(struct ixlv_sc *sc) | ||||
{ | { | ||||
struct i40e_virtchnl_rss_hena hena; | struct virtchnl_rss_hena hena; | ||||
+ struct i40e_hw *hw = &sc->hw; | |||||
hena.hena = IXL_DEFAULT_RSS_HENA_X722; | + if (hw->mac.type == I40E_MAC_X722_VF) | ||||
+ hena.hena = IXL_DEFAULT_RSS_HENA_X722; | |||||
+ else | |||||
+ hena.hena = IXL_DEFAULT_RSS_HENA_XL710; | |||||
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_SET_RSS_HENA, | ixlv_send_pf_msg(sc, VIRTCHNL_OP_SET_RSS_HENA, | ||||
(u8 *)&hena, sizeof(hena)); | (u8 *)&hena, sizeof(hena)); | ||||
} | } | ||||
void | void | ||||
ixlv_config_rss_lut(struct ixlv_sc *sc) | ixlv_config_rss_lut(struct ixlv_sc *sc) | ||||
{ | { | ||||
struct i40e_virtchnl_rss_lut *rss_lut_msg; | struct virtchnl_rss_lut *rss_lut_msg; | ||||
int msg_len; | int msg_len; | ||||
u16 lut_length; | u16 lut_length; | ||||
u32 lut; | u32 lut; | ||||
int i, que_id; | int i, que_id; | ||||
lut_length = IXL_RSS_VSI_LUT_SIZE; | lut_length = IXL_RSS_VSI_LUT_SIZE; | ||||
msg_len = sizeof(struct i40e_virtchnl_rss_lut) + (lut_length * sizeof(u8)) - 1; | msg_len = sizeof(struct virtchnl_rss_lut) + (lut_length * sizeof(u8)) - 1; | ||||
rss_lut_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO); | rss_lut_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO); | ||||
if (rss_lut_msg == NULL) { | if (rss_lut_msg == NULL) { | ||||
device_printf(sc->dev, "Unable to allocate msg memory for RSS lut msg.\n"); | device_printf(sc->dev, "Unable to allocate msg memory for RSS lut msg.\n"); | ||||
return; | return; | ||||
} | } | ||||
rss_lut_msg->vsi_id = sc->vsi_res->vsi_id; | rss_lut_msg->vsi_id = sc->vsi_res->vsi_id; | ||||
/* Each LUT entry is a max of 1 byte, so this is easy */ | /* Each LUT entry is a max of 1 byte, so this is easy */ | ||||
rss_lut_msg->lut_entries = lut_length; | rss_lut_msg->lut_entries = lut_length; | ||||
/* Populate the LUT with max no. of queues in round robin fashion */ | /* Populate the LUT with max no. of queues in round robin fashion */ | ||||
for (i = 0; i < lut_length; i++) { | for (i = 0; i < lut_length; i++) { | ||||
#ifdef RSS | #ifdef RSS | ||||
/* | /* | ||||
* Fetch the RSS bucket id for the given indirection entry. | * Fetch the RSS bucket id for the given indirection entry. | ||||
* Cap it at the number of configured buckets (which is | * Cap it at the number of configured buckets (which is | ||||
* num_queues.) | * num_queues.) | ||||
*/ | */ | ||||
que_id = rss_get_indirection_to_bucket(i); | que_id = rss_get_indirection_to_bucket(i); | ||||
que_id = que_id % sc->vsi.num_queues; | + que_id = que_id % sc->vsi.num_rx_queues; | ||||
#else | #else | ||||
que_id = i % sc->vsi.num_queues; | + que_id = i % sc->vsi.num_rx_queues; | ||||
#endif | #endif | ||||
lut = que_id & IXL_RSS_VSI_LUT_ENTRY_MASK; | lut = que_id & IXL_RSS_VSI_LUT_ENTRY_MASK; | ||||
rss_lut_msg->lut[i] = lut; | rss_lut_msg->lut[i] = lut; | ||||
} | } | ||||
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT, | ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_RSS_LUT, | ||||
(u8 *)rss_lut_msg, msg_len); | (u8 *)rss_lut_msg, msg_len); | ||||
free(rss_lut_msg, M_DEVBUF); | free(rss_lut_msg, M_DEVBUF); | ||||
} | } | ||||
/* | /* | ||||
** ixlv_vc_completion | ** ixlv_vc_completion | ||||
** | ** | ||||
** Asynchronous completion function for admin queue messages. Rather than busy | ** Asynchronous completion function for admin queue messages. Rather than busy | ||||
** wait, we fire off our requests and assume that no errors will be returned. | ** wait, we fire off our requests and assume that no errors will be returned. | ||||
** This function handles the reply messages. | ** This function handles the reply messages. | ||||
*/ | */ | ||||
void | void | ||||
ixlv_vc_completion(struct ixlv_sc *sc, | ixlv_vc_completion(struct ixlv_sc *sc, | ||||
enum i40e_virtchnl_ops v_opcode, | enum virtchnl_ops v_opcode, | ||||
i40e_status v_retval, u8 *msg, u16 msglen) | enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) | ||||
{ | { | ||||
device_t dev = sc->dev; | device_t dev = sc->dev; | ||||
struct ixl_vsi *vsi = &sc->vsi; | struct ixl_vsi *vsi = &sc->vsi; | ||||
if (v_opcode == I40E_VIRTCHNL_OP_EVENT) { | if (v_opcode == VIRTCHNL_OP_EVENT) { | ||||
struct i40e_virtchnl_pf_event *vpe = | struct virtchnl_pf_event *vpe = | ||||
(struct i40e_virtchnl_pf_event *)msg; | (struct virtchnl_pf_event *)msg; | ||||
switch (vpe->event) { | switch (vpe->event) { | ||||
case I40E_VIRTCHNL_EVENT_LINK_CHANGE: | case VIRTCHNL_EVENT_LINK_CHANGE: | ||||
#ifdef IXL_DEBUG | #ifdef IXL_DEBUG | ||||
device_printf(dev, "Link change: status %d, speed %d\n", | device_printf(dev, "Link change: status %d, speed %d\n", | ||||
vpe->event_data.link_event.link_status, | vpe->event_data.link_event.link_status, | ||||
vpe->event_data.link_event.link_speed); | vpe->event_data.link_event.link_speed); | ||||
#endif | #endif | ||||
sc->link_up = | sc->link_up = | ||||
vpe->event_data.link_event.link_status; | vpe->event_data.link_event.link_status; | ||||
sc->link_speed = | sc->link_speed = | ||||
vpe->event_data.link_event.link_speed; | vpe->event_data.link_event.link_speed; | ||||
ixlv_update_link_status(sc); | ixlv_update_link_status(sc); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_EVENT_RESET_IMPENDING: | case VIRTCHNL_EVENT_RESET_IMPENDING: | ||||
device_printf(dev, "PF initiated reset!\n"); | device_printf(dev, "PF initiated reset!\n"); | ||||
sc->init_state = IXLV_RESET_PENDING; | sc->init_state = IXLV_RESET_PENDING; | ||||
mtx_unlock(&sc->mtx); | + // mtx_unlock(&sc->mtx); | ||||
ixlv_init(vsi); | + ixlv_if_init(sc->vsi.ctx); | ||||
mtx_lock(&sc->mtx); | + // mtx_lock(&sc->mtx); | ||||
break; | break; | ||||
default: | default: | ||||
device_printf(dev, "%s: Unknown event %d from AQ\n", | device_printf(dev, "%s: Unknown event %d from AQ\n", | ||||
__func__, vpe->event); | __func__, vpe->event); | ||||
break; | break; | ||||
} | } | ||||
return; | return; | ||||
} | } | ||||
/* Catch-all error response */ | /* Catch-all error response */ | ||||
if (v_retval) { | if (v_retval) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"%s: AQ returned error %s to our request %s!\n", | "%s: AQ returned error %s to our request %s!\n", | ||||
__func__, i40e_stat_str(&sc->hw, v_retval), ixl_vc_opcode_str(v_opcode)); | __func__, i40e_vc_stat_str(&sc->hw, v_retval), ixl_vc_opcode_str(v_opcode)); | ||||
} | } | ||||
#ifdef IXL_DEBUG | #ifdef IXL_DEBUG | ||||
if (v_opcode != I40E_VIRTCHNL_OP_GET_STATS) | if (v_opcode != VIRTCHNL_OP_GET_STATS) | ||||
DDPRINTF(dev, "opcode %d", v_opcode); | DDPRINTF(dev, "opcode %d", v_opcode); | ||||
#endif | #endif | ||||
switch (v_opcode) { | switch (v_opcode) { | ||||
case I40E_VIRTCHNL_OP_GET_STATS: | case VIRTCHNL_OP_GET_STATS: | ||||
ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg); | ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: | case VIRTCHNL_OP_ADD_ETH_ADDR: | ||||
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER, | ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER, | ||||
v_retval); | v_retval); | ||||
if (v_retval) { | if (v_retval) { | ||||
device_printf(dev, "WARNING: Error adding VF mac filter!\n"); | device_printf(dev, "WARNING: Error adding VF mac filter!\n"); | ||||
device_printf(dev, "WARNING: Device may not receive traffic!\n"); | device_printf(dev, "WARNING: Device may not receive traffic!\n"); | ||||
} | } | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: | case VIRTCHNL_OP_DEL_ETH_ADDR: | ||||
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER, | ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER, | ||||
v_retval); | v_retval); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: | case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: | ||||
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_PROMISC, | ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_PROMISC, | ||||
v_retval); | v_retval); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_ADD_VLAN: | case VIRTCHNL_OP_ADD_VLAN: | ||||
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER, | ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER, | ||||
v_retval); | v_retval); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_DEL_VLAN: | case VIRTCHNL_OP_DEL_VLAN: | ||||
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER, | ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER, | ||||
v_retval); | v_retval); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_ENABLE_QUEUES: | case VIRTCHNL_OP_ENABLE_QUEUES: | ||||
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ENABLE_QUEUES, | ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ENABLE_QUEUES, | ||||
v_retval); | v_retval); | ||||
if (v_retval == 0) { | if (v_retval == 0) { | ||||
/* Update link status */ | /* Update link status */ | ||||
ixlv_update_link_status(sc); | ixlv_update_link_status(sc); | ||||
/* Turn on all interrupts */ | /* Turn on all interrupts */ | ||||
ixlv_enable_intr(vsi); | ixlv_enable_intr(vsi); | ||||
/* And inform the stack we're ready */ | /* And inform the stack we're ready */ | ||||
vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING; | // vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING; | ||||
/* TODO: Clear a state flag, so we know we're ready to run init again */ | /* TODO: Clear a state flag, so we know we're ready to run init again */ | ||||
} | } | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_DISABLE_QUEUES: | case VIRTCHNL_OP_DISABLE_QUEUES: | ||||
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DISABLE_QUEUES, | ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DISABLE_QUEUES, | ||||
v_retval); | v_retval); | ||||
if (v_retval == 0) { | if (v_retval == 0) { | ||||
/* Turn off all interrupts */ | /* Turn off all interrupts */ | ||||
ixlv_disable_intr(vsi); | ixlv_disable_intr(vsi); | ||||
/* Tell the stack that the interface is no longer active */ | /* Tell the stack that the interface is no longer active */ | ||||
vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING); | vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING); | ||||
} | } | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: | case VIRTCHNL_OP_CONFIG_VSI_QUEUES: | ||||
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_QUEUES, | ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_QUEUES, | ||||
v_retval); | v_retval); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: | case VIRTCHNL_OP_CONFIG_IRQ_MAP: | ||||
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_MAP_VECTORS, | ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_MAP_VECTORS, | ||||
v_retval); | v_retval); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY: | case VIRTCHNL_OP_CONFIG_RSS_KEY: | ||||
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_KEY, | ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_KEY, | ||||
v_retval); | v_retval); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_SET_RSS_HENA: | case VIRTCHNL_OP_SET_RSS_HENA: | ||||
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_SET_RSS_HENA, | ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_SET_RSS_HENA, | ||||
v_retval); | v_retval); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT: | case VIRTCHNL_OP_CONFIG_RSS_LUT: | ||||
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_LUT, | ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_LUT, | ||||
v_retval); | v_retval); | ||||
break; | break; | ||||
default: | default: | ||||
#ifdef IXL_DEBUG | #ifdef IXL_DEBUG | ||||
device_printf(dev, | device_printf(dev, | ||||
"%s: Received unexpected message %s from PF.\n", | "%s: Received unexpected message %s from PF.\n", | ||||
__func__, ixl_vc_opcode_str(v_opcode)); | __func__, ixl_vc_opcode_str(v_opcode)); | ||||
▲ Show 20 Lines • Show All 73 Lines • ▼ Show 20 Lines | ixl_vc_process_completion(struct ixl_vc_mgr *mgr, enum i40e_status_code err) | ||||
cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY; | cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY; | ||||
cmd->callback(cmd, cmd->arg, err); | cmd->callback(cmd, cmd->arg, err); | ||||
ixl_vc_process_next(mgr); | ixl_vc_process_next(mgr); | ||||
} | } | ||||
static void | static void | ||||
ixl_vc_process_resp(struct ixl_vc_mgr *mgr, uint32_t request, | ixl_vc_process_resp(struct ixl_vc_mgr *mgr, uint32_t request, | ||||
enum i40e_status_code err) | enum virtchnl_status_code err) | ||||
{ | { | ||||
struct ixl_vc_cmd *cmd; | struct ixl_vc_cmd *cmd; | ||||
cmd = mgr->current; | cmd = mgr->current; | ||||
if (cmd == NULL || cmd->request != request) | if (cmd == NULL || cmd->request != request) | ||||
return; | return; | ||||
callout_stop(&mgr->callout); | callout_stop(&mgr->callout); | ||||
ixl_vc_process_completion(mgr, err); | /* ATM, the virtchnl codes map to i40e ones directly */ | ||||
ixl_vc_process_completion(mgr, (enum i40e_status_code)err); | |||||
} | } | ||||
static void | static void | ||||
ixl_vc_cmd_timeout(void *arg) | ixl_vc_cmd_timeout(void *arg) | ||||
{ | { | ||||
struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg; | struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg; | ||||
IXLV_CORE_LOCK_ASSERT(mgr->sc); | |||||
ixl_vc_process_completion(mgr, I40E_ERR_TIMEOUT); | ixl_vc_process_completion(mgr, I40E_ERR_TIMEOUT); | ||||
} | } | ||||
static void | static void | ||||
ixl_vc_cmd_retry(void *arg) | ixl_vc_cmd_retry(void *arg) | ||||
{ | { | ||||
struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg; | struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg; | ||||
IXLV_CORE_LOCK_ASSERT(mgr->sc); | |||||
ixl_vc_send_current(mgr); | ixl_vc_send_current(mgr); | ||||
} | } | ||||
static void | static void | ||||
ixl_vc_send_current(struct ixl_vc_mgr *mgr) | ixl_vc_send_current(struct ixl_vc_mgr *mgr) | ||||
{ | { | ||||
struct ixl_vc_cmd *cmd; | struct ixl_vc_cmd *cmd; | ||||
Show All 26 Lines | ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr) | ||||
callout_reset(&mgr->callout, howmany(hz, 100), ixl_vc_cmd_retry, mgr); | callout_reset(&mgr->callout, howmany(hz, 100), ixl_vc_cmd_retry, mgr); | ||||
} | } | ||||
void | void | ||||
ixl_vc_enqueue(struct ixl_vc_mgr *mgr, struct ixl_vc_cmd *cmd, | ixl_vc_enqueue(struct ixl_vc_mgr *mgr, struct ixl_vc_cmd *cmd, | ||||
uint32_t req, ixl_vc_callback_t *callback, void *arg) | uint32_t req, ixl_vc_callback_t *callback, void *arg) | ||||
{ | { | ||||
IXLV_CORE_LOCK_ASSERT(mgr->sc); | |||||
if (cmd->flags & IXLV_VC_CMD_FLAG_BUSY) { | if (cmd->flags & IXLV_VC_CMD_FLAG_BUSY) { | ||||
if (mgr->current == cmd) | if (mgr->current == cmd) | ||||
mgr->current = NULL; | mgr->current = NULL; | ||||
else | else | ||||
TAILQ_REMOVE(&mgr->pending, cmd, next); | TAILQ_REMOVE(&mgr->pending, cmd, next); | ||||
} | } | ||||
cmd->request = req; | cmd->request = req; | ||||
cmd->callback = callback; | cmd->callback = callback; | ||||
cmd->arg = arg; | cmd->arg = arg; | ||||
cmd->flags |= IXLV_VC_CMD_FLAG_BUSY; | cmd->flags |= IXLV_VC_CMD_FLAG_BUSY; | ||||
TAILQ_INSERT_TAIL(&mgr->pending, cmd, next); | TAILQ_INSERT_TAIL(&mgr->pending, cmd, next); | ||||
ixl_vc_process_next(mgr); | ixl_vc_process_next(mgr); | ||||
} | } | ||||
void | void | ||||
ixl_vc_flush(struct ixl_vc_mgr *mgr) | ixl_vc_flush(struct ixl_vc_mgr *mgr) | ||||
{ | { | ||||
struct ixl_vc_cmd *cmd; | struct ixl_vc_cmd *cmd; | ||||
IXLV_CORE_LOCK_ASSERT(mgr->sc); | |||||
KASSERT(TAILQ_EMPTY(&mgr->pending) || mgr->current != NULL, | KASSERT(TAILQ_EMPTY(&mgr->pending) || mgr->current != NULL, | ||||
("ixlv: pending commands waiting but no command in progress")); | ("ixlv: pending commands waiting but no command in progress")); | ||||
cmd = mgr->current; | cmd = mgr->current; | ||||
if (cmd != NULL) { | if (cmd != NULL) { | ||||
mgr->current = NULL; | mgr->current = NULL; | ||||
cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY; | cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY; | ||||
cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED); | cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED); | ||||
Show All 11 Lines |