Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/ixl/ixlvc.c
Show First 20 Lines • Show All 211 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
struct i40e_virtchnl_version_info *pf_vvi; | struct i40e_virtchnl_version_info *pf_vvi; | ||||
struct i40e_hw *hw = &sc->hw; | struct i40e_hw *hw = &sc->hw; | ||||
struct i40e_arq_event_info event; | struct i40e_arq_event_info event; | ||||
i40e_status err; | i40e_status err; | ||||
int retries = 0; | int retries = 0; | ||||
event.buf_len = IXL_AQ_BUFSZ; | event.buf_len = IXL_AQ_BUFSZ; | ||||
event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT); | event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT); | ||||
if (!event.msg_buf) { | if (!event.msg_buf) { | ||||
err = ENOMEM; | err = ENOMEM; | ||||
goto out; | goto out; | ||||
} | } | ||||
do { | do { | ||||
if (++retries > IXLV_AQ_MAX_ERR) | if (++retries > IXLV_AQ_MAX_ERR) | ||||
goto out_alloc; | goto out_alloc; | ||||
Show All 23 Lines | ixlv_verify_api_ver(struct ixlv_sc *sc) | ||||
if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) || | if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) || | ||||
((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) && | ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) && | ||||
(pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR))) | (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR))) | ||||
err = EIO; | err = EIO; | ||||
else | else | ||||
sc->pf_version = pf_vvi->minor; | sc->pf_version = pf_vvi->minor; | ||||
out_alloc: | out_alloc: | ||||
free(event.msg_buf, M_DEVBUF); | free(event.msg_buf, M_IXL); | ||||
out: | out: | ||||
return err; | return err; | ||||
} | } | ||||
/* | /* | ||||
** ixlv_send_vf_config_msg | ** ixlv_send_vf_config_msg | ||||
** | ** | ||||
** Send VF configuration request admin queue message to the PF. The reply | ** Send VF configuration request admin queue message to the PF. The reply | ||||
Show All 35 Lines | ixlv_get_vf_config(struct ixlv_sc *sc) | ||||
u16 len; | u16 len; | ||||
i40e_status err = 0; | i40e_status err = 0; | ||||
u32 retries = 0; | u32 retries = 0; | ||||
/* Note this assumes a single VSI */ | /* Note this assumes a single VSI */ | ||||
len = sizeof(struct i40e_virtchnl_vf_resource) + | len = sizeof(struct i40e_virtchnl_vf_resource) + | ||||
sizeof(struct i40e_virtchnl_vsi_resource); | sizeof(struct i40e_virtchnl_vsi_resource); | ||||
event.buf_len = len; | event.buf_len = len; | ||||
event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT); | event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT); | ||||
if (!event.msg_buf) { | if (!event.msg_buf) { | ||||
err = ENOMEM; | err = ENOMEM; | ||||
goto out; | goto out; | ||||
} | } | ||||
for (;;) { | for (;;) { | ||||
err = i40e_clean_arq_element(hw, &event, NULL); | err = i40e_clean_arq_element(hw, &event, NULL); | ||||
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { | if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { | ||||
Show All 28 Lines | if (retries > IXLV_AQ_MAX_ERR) { | ||||
goto out_alloc; | goto out_alloc; | ||||
} | } | ||||
} | } | ||||
memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len)); | memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len)); | ||||
i40e_vf_parse_hw_config(hw, sc->vf_res); | i40e_vf_parse_hw_config(hw, sc->vf_res); | ||||
out_alloc: | out_alloc: | ||||
free(event.msg_buf, M_DEVBUF); | free(event.msg_buf, M_IXL); | ||||
out: | out: | ||||
return err; | return err; | ||||
} | } | ||||
/* | /* | ||||
** ixlv_configure_queues | ** ixlv_configure_queues | ||||
** | ** | ||||
** Request that the PF set up our queues. | ** Request that the PF set up our queues. | ||||
*/ | */ | ||||
void | void | ||||
ixlv_configure_queues(struct ixlv_sc *sc) | ixlv_configure_queues(struct ixlv_sc *sc) | ||||
{ | { | ||||
device_t dev = sc->dev; | device_t dev = sc->dev; | ||||
struct ixl_vsi *vsi = &sc->vsi; | struct ixl_vsi *vsi = &sc->vsi; | ||||
struct ixl_queue *que = vsi->queues; | struct ixl_queue *que = vsi->queues; | ||||
struct tx_ring *txr; | struct tx_ring *txr; | ||||
struct rx_ring *rxr; | struct rx_ring *rxr; | ||||
if_shared_ctx_t sctx; | |||||
int len, pairs; | int len, pairs; | ||||
struct i40e_virtchnl_vsi_queue_config_info *vqci; | struct i40e_virtchnl_vsi_queue_config_info *vqci; | ||||
struct i40e_virtchnl_queue_pair_info *vqpi; | struct i40e_virtchnl_queue_pair_info *vqpi; | ||||
pairs = vsi->num_queues; | pairs = vsi->num_queues; | ||||
len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) + | len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) + | ||||
(sizeof(struct i40e_virtchnl_queue_pair_info) * pairs); | (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs); | ||||
vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); | vqci = malloc(len, M_IXL, M_NOWAIT | M_ZERO); | ||||
if (!vqci) { | if (!vqci) { | ||||
device_printf(dev, "%s: unable to allocate memory\n", __func__); | device_printf(dev, "%s: unable to allocate memory\n", __func__); | ||||
ixl_vc_schedule_retry(&sc->vc_mgr); | ixl_vc_schedule_retry(&sc->vc_mgr); | ||||
return; | return; | ||||
} | } | ||||
sctx = iflib_get_sctx(sc->vsi.ctx); | |||||
vqci->vsi_id = sc->vsi_res->vsi_id; | vqci->vsi_id = sc->vsi_res->vsi_id; | ||||
vqci->num_queue_pairs = pairs; | vqci->num_queue_pairs = pairs; | ||||
vqpi = vqci->qpair; | vqpi = vqci->qpair; | ||||
/* Size check is not needed here - HW max is 16 queue pairs, and we | /* Size check is not needed here - HW max is 16 queue pairs, and we | ||||
* can fit info for 31 of them into the AQ buffer before it overflows. | * can fit info for 31 of them into the AQ buffer before it overflows. | ||||
*/ | */ | ||||
for (int i = 0; i < pairs; i++, que++, vqpi++) { | for (int i = 0; i < pairs; i++, que++, vqpi++) { | ||||
txr = &que->txr; | txr = &que->txr; | ||||
rxr = &que->rxr; | rxr = &que->rxr; | ||||
vqpi->txq.vsi_id = vqci->vsi_id; | vqpi->txq.vsi_id = vqci->vsi_id; | ||||
vqpi->txq.queue_id = i; | vqpi->txq.queue_id = i; | ||||
vqpi->txq.ring_len = que->num_desc; | vqpi->txq.ring_len = sctx->isc_ntxd; | ||||
vqpi->txq.dma_ring_addr = txr->dma.pa; | vqpi->txq.dma_ring_addr = txr->tx_paddr; | ||||
/* Enable Head writeback */ | /* Enable Head writeback */ | ||||
vqpi->txq.headwb_enabled = 1; | vqpi->txq.headwb_enabled = 1; | ||||
vqpi->txq.dma_headwb_addr = txr->dma.pa + | vqpi->txq.dma_headwb_addr = txr->tx_paddr + | ||||
(que->num_desc * sizeof(struct i40e_tx_desc)); | (sctx->isc_ntxd * sizeof(struct i40e_tx_desc)); | ||||
vqpi->rxq.vsi_id = vqci->vsi_id; | vqpi->rxq.vsi_id = vqci->vsi_id; | ||||
vqpi->rxq.queue_id = i; | vqpi->rxq.queue_id = i; | ||||
vqpi->rxq.ring_len = que->num_desc; | vqpi->rxq.ring_len = sctx->isc_ntxd; | ||||
vqpi->rxq.dma_ring_addr = rxr->dma.pa; | vqpi->rxq.dma_ring_addr = rxr->rx_paddr; | ||||
vqpi->rxq.max_pkt_size = vsi->max_frame_size; | vqpi->rxq.max_pkt_size = vsi->max_frame_size; | ||||
vqpi->rxq.databuffer_size = rxr->mbuf_sz; | vqpi->rxq.databuffer_size = rxr->mbuf_sz; | ||||
vqpi->rxq.splithdr_enabled = 0; | vqpi->rxq.splithdr_enabled = 0; | ||||
} | } | ||||
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, | ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, | ||||
(u8 *)vqci, len); | (u8 *)vqci, len); | ||||
free(vqci, M_DEVBUF); | free(vqci, M_IXL); | ||||
} | } | ||||
/* | /* | ||||
** ixlv_enable_queues | ** ixlv_enable_queues | ||||
** | ** | ||||
** Request that the PF enable all of our queues. | ** Request that the PF enable all of our queues. | ||||
*/ | */ | ||||
void | void | ||||
Show All 39 Lines | ixlv_map_queues(struct ixlv_sc *sc) | ||||
struct ixl_vsi *vsi = &sc->vsi; | struct ixl_vsi *vsi = &sc->vsi; | ||||
struct ixl_queue *que = vsi->queues; | struct ixl_queue *que = vsi->queues; | ||||
/* How many queue vectors, adminq uses one */ | /* How many queue vectors, adminq uses one */ | ||||
q = sc->msix - 1; | q = sc->msix - 1; | ||||
len = sizeof(struct i40e_virtchnl_irq_map_info) + | len = sizeof(struct i40e_virtchnl_irq_map_info) + | ||||
(sc->msix * sizeof(struct i40e_virtchnl_vector_map)); | (sc->msix * sizeof(struct i40e_virtchnl_vector_map)); | ||||
vm = malloc(len, M_DEVBUF, M_NOWAIT); | vm = malloc(len, M_IXL, M_NOWAIT); | ||||
if (!vm) { | if (!vm) { | ||||
printf("%s: unable to allocate memory\n", __func__); | printf("%s: unable to allocate memory\n", __func__); | ||||
ixl_vc_schedule_retry(&sc->vc_mgr); | ixl_vc_schedule_retry(&sc->vc_mgr); | ||||
return; | return; | ||||
} | } | ||||
vm->num_vectors = sc->msix; | vm->num_vectors = sc->msix; | ||||
/* Queue vectors first */ | /* Queue vectors first */ | ||||
Show All 11 Lines | ixlv_map_queues(struct ixlv_sc *sc) | ||||
vm->vecmap[i].vector_id = 0; | vm->vecmap[i].vector_id = 0; | ||||
vm->vecmap[i].txq_map = 0; | vm->vecmap[i].txq_map = 0; | ||||
vm->vecmap[i].rxq_map = 0; | vm->vecmap[i].rxq_map = 0; | ||||
vm->vecmap[i].rxitr_idx = 0; | vm->vecmap[i].rxitr_idx = 0; | ||||
vm->vecmap[i].txitr_idx = 0; | vm->vecmap[i].txitr_idx = 0; | ||||
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, | ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, | ||||
(u8 *)vm, len); | (u8 *)vm, len); | ||||
free(vm, M_DEVBUF); | free(vm, M_IXL); | ||||
} | } | ||||
/* | /* | ||||
** Scan the Filter List looking for vlans that need | ** Scan the Filter List looking for vlans that need | ||||
** to be added, then create the data to hand to the AQ | ** to be added, then create the data to hand to the AQ | ||||
** for handling. | ** for handling. | ||||
*/ | */ | ||||
void | void | ||||
Show All 21 Lines | ixlv_add_vlans(struct ixlv_sc *sc) | ||||
if (len > IXL_AQ_BUF_SZ) { | if (len > IXL_AQ_BUF_SZ) { | ||||
device_printf(dev, "%s: Exceeded Max AQ Buf size\n", | device_printf(dev, "%s: Exceeded Max AQ Buf size\n", | ||||
__func__); | __func__); | ||||
ixl_vc_schedule_retry(&sc->vc_mgr); | ixl_vc_schedule_retry(&sc->vc_mgr); | ||||
return; | return; | ||||
} | } | ||||
v = malloc(len, M_DEVBUF, M_NOWAIT); | v = malloc(len, M_IXL, M_NOWAIT); | ||||
if (!v) { | if (!v) { | ||||
device_printf(dev, "%s: unable to allocate memory\n", | device_printf(dev, "%s: unable to allocate memory\n", | ||||
__func__); | __func__); | ||||
ixl_vc_schedule_retry(&sc->vc_mgr); | ixl_vc_schedule_retry(&sc->vc_mgr); | ||||
return; | return; | ||||
} | } | ||||
v->vsi_id = sc->vsi_res->vsi_id; | v->vsi_id = sc->vsi_res->vsi_id; | ||||
Show All 13 Lines | ixlv_add_vlans(struct ixlv_sc *sc) | ||||
if (i == 0) { /* Should not happen... */ | if (i == 0) { /* Should not happen... */ | ||||
device_printf(dev, "%s: i == 0?\n", __func__); | device_printf(dev, "%s: i == 0?\n", __func__); | ||||
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER, | ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER, | ||||
I40E_SUCCESS); | I40E_SUCCESS); | ||||
return; | return; | ||||
} | } | ||||
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len); | ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len); | ||||
free(v, M_DEVBUF); | free(v, M_IXL); | ||||
/* add stats? */ | /* add stats? */ | ||||
} | } | ||||
/* | /* | ||||
** Scan the Filter Table looking for vlans that need | ** Scan the Filter Table looking for vlans that need | ||||
** to be removed, then create the data to hand to the AQ | ** to be removed, then create the data to hand to the AQ | ||||
** for handling. | ** for handling. | ||||
*/ | */ | ||||
Show All 22 Lines | ixlv_del_vlans(struct ixlv_sc *sc) | ||||
if (len > IXL_AQ_BUF_SZ) { | if (len > IXL_AQ_BUF_SZ) { | ||||
device_printf(dev, "%s: Exceeded Max AQ Buf size\n", | device_printf(dev, "%s: Exceeded Max AQ Buf size\n", | ||||
__func__); | __func__); | ||||
ixl_vc_schedule_retry(&sc->vc_mgr); | ixl_vc_schedule_retry(&sc->vc_mgr); | ||||
return; | return; | ||||
} | } | ||||
v = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); | v = malloc(len, M_IXL, M_NOWAIT | M_ZERO); | ||||
if (!v) { | if (!v) { | ||||
device_printf(dev, "%s: unable to allocate memory\n", | device_printf(dev, "%s: unable to allocate memory\n", | ||||
__func__); | __func__); | ||||
ixl_vc_schedule_retry(&sc->vc_mgr); | ixl_vc_schedule_retry(&sc->vc_mgr); | ||||
return; | return; | ||||
} | } | ||||
v->vsi_id = sc->vsi_res->vsi_id; | v->vsi_id = sc->vsi_res->vsi_id; | ||||
v->num_elements = cnt; | v->num_elements = cnt; | ||||
/* Scan the filter array */ | /* Scan the filter array */ | ||||
SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) { | SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) { | ||||
if (f->flags & IXL_FILTER_DEL) { | if (f->flags & IXL_FILTER_DEL) { | ||||
bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16)); | bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16)); | ||||
i++; | i++; | ||||
SLIST_REMOVE(sc->vlan_filters, f, ixlv_vlan_filter, next); | SLIST_REMOVE(sc->vlan_filters, f, ixlv_vlan_filter, next); | ||||
free(f, M_DEVBUF); | free(f, M_IXL); | ||||
} | } | ||||
if (i == cnt) | if (i == cnt) | ||||
break; | break; | ||||
} | } | ||||
// ERJ: Take this out? | // ERJ: Take this out? | ||||
if (i == 0) { /* Should not happen... */ | if (i == 0) { /* Should not happen... */ | ||||
device_printf(dev, "%s: i == 0?\n", __func__); | device_printf(dev, "%s: i == 0?\n", __func__); | ||||
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER, | ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER, | ||||
I40E_SUCCESS); | I40E_SUCCESS); | ||||
return; | return; | ||||
} | } | ||||
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len); | ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len); | ||||
free(v, M_DEVBUF); | free(v, M_IXL); | ||||
/* add stats? */ | /* add stats? */ | ||||
} | } | ||||
/* | /* | ||||
** This routine takes additions to the vsi filter | ** This routine takes additions to the vsi filter | ||||
** table and creates an Admin Queue call to create | ** table and creates an Admin Queue call to create | ||||
** the filters in the hardware. | ** the filters in the hardware. | ||||
Show All 16 Lines | if (cnt == 0) { /* Should not happen... */ | ||||
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER, | ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER, | ||||
I40E_SUCCESS); | I40E_SUCCESS); | ||||
return; | return; | ||||
} | } | ||||
len = sizeof(struct i40e_virtchnl_ether_addr_list) + | len = sizeof(struct i40e_virtchnl_ether_addr_list) + | ||||
(cnt * sizeof(struct i40e_virtchnl_ether_addr)); | (cnt * sizeof(struct i40e_virtchnl_ether_addr)); | ||||
a = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); | a = malloc(len, M_IXL, M_NOWAIT | M_ZERO); | ||||
if (a == NULL) { | if (a == NULL) { | ||||
device_printf(dev, "%s: Failed to get memory for " | device_printf(dev, "%s: Failed to get memory for " | ||||
"virtchnl_ether_addr_list\n", __func__); | "virtchnl_ether_addr_list\n", __func__); | ||||
ixl_vc_schedule_retry(&sc->vc_mgr); | ixl_vc_schedule_retry(&sc->vc_mgr); | ||||
return; | return; | ||||
} | } | ||||
a->vsi_id = sc->vsi.id; | a->vsi_id = sc->vsi.id; | ||||
a->num_elements = cnt; | a->num_elements = cnt; | ||||
Show All 11 Lines | SLIST_FOREACH(f, sc->mac_filters, next) { | ||||
if (j == cnt) | if (j == cnt) | ||||
break; | break; | ||||
} | } | ||||
DDPRINTF(dev, "len %d, j %d, cnt %d", | DDPRINTF(dev, "len %d, j %d, cnt %d", | ||||
len, j, cnt); | len, j, cnt); | ||||
ixlv_send_pf_msg(sc, | ixlv_send_pf_msg(sc, | ||||
I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)a, len); | I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)a, len); | ||||
/* add stats? */ | /* add stats? */ | ||||
free(a, M_DEVBUF); | free(a, M_IXL); | ||||
return; | return; | ||||
} | } | ||||
/* | /* | ||||
** This routine takes filters flagged for deletion in the | ** This routine takes filters flagged for deletion in the | ||||
** sc MAC filter list and creates an Admin Queue call | ** sc MAC filter list and creates an Admin Queue call | ||||
** to delete those filters in the hardware. | ** to delete those filters in the hardware. | ||||
*/ | */ | ||||
Show All 15 Lines | if (cnt == 0) { | ||||
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER, | ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER, | ||||
I40E_SUCCESS); | I40E_SUCCESS); | ||||
return; | return; | ||||
} | } | ||||
len = sizeof(struct i40e_virtchnl_ether_addr_list) + | len = sizeof(struct i40e_virtchnl_ether_addr_list) + | ||||
(cnt * sizeof(struct i40e_virtchnl_ether_addr)); | (cnt * sizeof(struct i40e_virtchnl_ether_addr)); | ||||
d = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); | d = malloc(len, M_IXL, M_NOWAIT | M_ZERO); | ||||
if (d == NULL) { | if (d == NULL) { | ||||
device_printf(dev, "%s: Failed to get memory for " | device_printf(dev, "%s: Failed to get memory for " | ||||
"virtchnl_ether_addr_list\n", __func__); | "virtchnl_ether_addr_list\n", __func__); | ||||
ixl_vc_schedule_retry(&sc->vc_mgr); | ixl_vc_schedule_retry(&sc->vc_mgr); | ||||
return; | return; | ||||
} | } | ||||
d->vsi_id = sc->vsi.id; | d->vsi_id = sc->vsi.id; | ||||
d->num_elements = cnt; | d->num_elements = cnt; | ||||
/* Scan the filter array */ | /* Scan the filter array */ | ||||
SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) { | SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) { | ||||
if (f->flags & IXL_FILTER_DEL) { | if (f->flags & IXL_FILTER_DEL) { | ||||
bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN); | bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN); | ||||
DDPRINTF(dev, "DEL: " MAC_FORMAT, | DDPRINTF(dev, "DEL: " MAC_FORMAT, | ||||
MAC_FORMAT_ARGS(f->macaddr)); | MAC_FORMAT_ARGS(f->macaddr)); | ||||
j++; | j++; | ||||
SLIST_REMOVE(sc->mac_filters, f, ixlv_mac_filter, next); | SLIST_REMOVE(sc->mac_filters, f, ixlv_mac_filter, next); | ||||
free(f, M_DEVBUF); | free(f, M_IXL); | ||||
} | } | ||||
if (j == cnt) | if (j == cnt) | ||||
break; | break; | ||||
} | } | ||||
ixlv_send_pf_msg(sc, | ixlv_send_pf_msg(sc, | ||||
I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)d, len); | I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)d, len); | ||||
/* add stats? */ | /* add stats? */ | ||||
free(d, M_DEVBUF); | free(d, M_IXL); | ||||
return; | return; | ||||
} | } | ||||
/* | /* | ||||
** ixlv_request_reset | ** ixlv_request_reset | ||||
** Request that the PF reset this VF. No response is expected. | ** Request that the PF reset this VF. No response is expected. | ||||
*/ | */ | ||||
void | void | ||||
Show All 33 Lines | |||||
*/ | */ | ||||
void | void | ||||
ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es) | ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es) | ||||
{ | { | ||||
struct ixl_vsi *vsi = &sc->vsi; | struct ixl_vsi *vsi = &sc->vsi; | ||||
uint64_t tx_discards; | uint64_t tx_discards; | ||||
tx_discards = es->tx_discards; | tx_discards = es->tx_discards; | ||||
#ifdef notyet | |||||
for (int i = 0; i < vsi->num_queues; i++) | for (int i = 0; i < vsi->num_queues; i++) | ||||
tx_discards += sc->vsi.queues[i].txr.br->br_drops; | tx_discards += sc->vsi.queues[i].txr.br->br_drops; | ||||
#endif | |||||
/* Update ifnet stats */ | /* Update ifnet stats */ | ||||
IXL_SET_IPACKETS(vsi, es->rx_unicast + | IXL_SET_IPACKETS(vsi, es->rx_unicast + | ||||
es->rx_multicast + | es->rx_multicast + | ||||
es->rx_broadcast); | es->rx_broadcast); | ||||
IXL_SET_OPACKETS(vsi, es->tx_unicast + | IXL_SET_OPACKETS(vsi, es->tx_unicast + | ||||
es->tx_multicast + | es->tx_multicast + | ||||
es->tx_broadcast); | es->tx_broadcast); | ||||
IXL_SET_IBYTES(vsi, es->rx_bytes); | IXL_SET_IBYTES(vsi, es->rx_bytes); | ||||
▲ Show 20 Lines • Show All 176 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
void | void | ||||
ixl_vc_init_mgr(struct ixlv_sc *sc, struct ixl_vc_mgr *mgr) | ixl_vc_init_mgr(struct ixlv_sc *sc, struct ixl_vc_mgr *mgr) | ||||
{ | { | ||||
mgr->sc = sc; | mgr->sc = sc; | ||||
mgr->current = NULL; | mgr->current = NULL; | ||||
TAILQ_INIT(&mgr->pending); | TAILQ_INIT(&mgr->pending); | ||||
callout_init_mtx(&mgr->callout, &sc->mtx, 0); | |||||
} | } | ||||
static void | static void | ||||
ixl_vc_process_completion(struct ixl_vc_mgr *mgr, enum i40e_status_code err) | ixl_vc_process_completion(struct ixl_vc_mgr *mgr, enum i40e_status_code err) | ||||
{ | { | ||||
struct ixl_vc_cmd *cmd; | struct ixl_vc_cmd *cmd; | ||||
cmd = mgr->current; | cmd = mgr->current; | ||||
Show All 18 Lines | ixl_vc_process_resp(struct ixl_vc_mgr *mgr, uint32_t request, | ||||
ixl_vc_process_completion(mgr, err); | ixl_vc_process_completion(mgr, err); | ||||
} | } | ||||
static void | static void | ||||
ixl_vc_cmd_timeout(void *arg) | ixl_vc_cmd_timeout(void *arg) | ||||
{ | { | ||||
struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg; | struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg; | ||||
IXLV_CORE_LOCK_ASSERT(mgr->sc); | |||||
ixl_vc_process_completion(mgr, I40E_ERR_TIMEOUT); | ixl_vc_process_completion(mgr, I40E_ERR_TIMEOUT); | ||||
} | } | ||||
static void | static void | ||||
ixl_vc_cmd_retry(void *arg) | ixl_vc_cmd_retry(void *arg) | ||||
{ | { | ||||
struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg; | struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg; | ||||
IXLV_CORE_LOCK_ASSERT(mgr->sc); | |||||
ixl_vc_send_current(mgr); | ixl_vc_send_current(mgr); | ||||
} | } | ||||
static void | static void | ||||
ixl_vc_send_current(struct ixl_vc_mgr *mgr) | ixl_vc_send_current(struct ixl_vc_mgr *mgr) | ||||
{ | { | ||||
struct ixl_vc_cmd *cmd; | struct ixl_vc_cmd *cmd; | ||||
Show All 26 Lines | ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr) | ||||
callout_reset(&mgr->callout, howmany(hz, 100), ixl_vc_cmd_retry, mgr); | callout_reset(&mgr->callout, howmany(hz, 100), ixl_vc_cmd_retry, mgr); | ||||
} | } | ||||
void | void | ||||
ixl_vc_enqueue(struct ixl_vc_mgr *mgr, struct ixl_vc_cmd *cmd, | ixl_vc_enqueue(struct ixl_vc_mgr *mgr, struct ixl_vc_cmd *cmd, | ||||
uint32_t req, ixl_vc_callback_t *callback, void *arg) | uint32_t req, ixl_vc_callback_t *callback, void *arg) | ||||
{ | { | ||||
IXLV_CORE_LOCK_ASSERT(mgr->sc); | |||||
if (cmd->flags & IXLV_VC_CMD_FLAG_BUSY) { | if (cmd->flags & IXLV_VC_CMD_FLAG_BUSY) { | ||||
if (mgr->current == cmd) | if (mgr->current == cmd) | ||||
mgr->current = NULL; | mgr->current = NULL; | ||||
else | else | ||||
TAILQ_REMOVE(&mgr->pending, cmd, next); | TAILQ_REMOVE(&mgr->pending, cmd, next); | ||||
} | } | ||||
cmd->request = req; | cmd->request = req; | ||||
cmd->callback = callback; | cmd->callback = callback; | ||||
cmd->arg = arg; | cmd->arg = arg; | ||||
cmd->flags |= IXLV_VC_CMD_FLAG_BUSY; | cmd->flags |= IXLV_VC_CMD_FLAG_BUSY; | ||||
TAILQ_INSERT_TAIL(&mgr->pending, cmd, next); | TAILQ_INSERT_TAIL(&mgr->pending, cmd, next); | ||||
ixl_vc_process_next(mgr); | ixl_vc_process_next(mgr); | ||||
} | } | ||||
void | void | ||||
ixl_vc_flush(struct ixl_vc_mgr *mgr) | ixl_vc_flush(struct ixl_vc_mgr *mgr) | ||||
{ | { | ||||
struct ixl_vc_cmd *cmd; | struct ixl_vc_cmd *cmd; | ||||
IXLV_CORE_LOCK_ASSERT(mgr->sc); | |||||
KASSERT(TAILQ_EMPTY(&mgr->pending) || mgr->current != NULL, | KASSERT(TAILQ_EMPTY(&mgr->pending) || mgr->current != NULL, | ||||
("ixlv: pending commands waiting but no command in progress")); | ("ixlv: pending commands waiting but no command in progress")); | ||||
cmd = mgr->current; | cmd = mgr->current; | ||||
if (cmd != NULL) { | if (cmd != NULL) { | ||||
mgr->current = NULL; | mgr->current = NULL; | ||||
cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY; | cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY; | ||||
cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED); | cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED); | ||||
Show All 11 Lines |