Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/ixl/ixlvc.c
/****************************************************************************** | /****************************************************************************** | ||||
Copyright (c) 2013-2015, Intel Corporation | Copyright (c) 2013-2017, Intel Corporation | ||||
All rights reserved. | All rights reserved. | ||||
Redistribution and use in source and binary forms, with or without | Redistribution and use in source and binary forms, with or without | ||||
modification, are permitted provided that the following conditions are met: | modification, are permitted provided that the following conditions are met: | ||||
1. Redistributions of source code must retain the above copyright notice, | 1. Redistributions of source code must retain the above copyright notice, | ||||
this list of conditions and the following disclaimer. | this list of conditions and the following disclaimer. | ||||
▲ Show 20 Lines • Show All 161 Lines • ▼ Show 20 Lines | #ifdef IXL_DEBUG | ||||
val_err = ixl_vc_validate_vf_msg(sc, op, msg, len); | val_err = ixl_vc_validate_vf_msg(sc, op, msg, len); | ||||
if (val_err) | if (val_err) | ||||
device_printf(dev, "Error validating msg to PF for op %d," | device_printf(dev, "Error validating msg to PF for op %d," | ||||
" msglen %d: error %d\n", op, len, val_err); | " msglen %d: error %d\n", op, len, val_err); | ||||
#endif | #endif | ||||
err = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL); | err = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL); | ||||
if (err) | if (err) | ||||
device_printf(dev, "Unable to send opcode %s to PF, " | device_printf(dev, "Unable to send opcode %d to PF, " | ||||
"status %s, aq error %s\n", | "error %d, aq status %d\n", op, err, hw->aq.asq_last_status); | ||||
ixl_vc_opcode_str(op), | |||||
i40e_stat_str(hw, err), | |||||
i40e_aq_str(hw, hw->aq.asq_last_status)); | |||||
return err; | return err; | ||||
} | } | ||||
/* | /* | ||||
** ixlv_send_api_ver | ** ixlv_send_api_ver | ||||
** | ** | ||||
** Send API version admin queue message to the PF. The reply is not checked | ** Send API version admin queue message to the PF. The reply is not checked | ||||
▲ Show 20 Lines • Show All 187 Lines • ▼ Show 20 Lines | |||||
** | ** | ||||
** Request that the PF set up our queues. | ** Request that the PF set up our queues. | ||||
*/ | */ | ||||
void | void | ||||
ixlv_configure_queues(struct ixlv_sc *sc) | ixlv_configure_queues(struct ixlv_sc *sc) | ||||
{ | { | ||||
device_t dev = sc->dev; | device_t dev = sc->dev; | ||||
struct ixl_vsi *vsi = &sc->vsi; | struct ixl_vsi *vsi = &sc->vsi; | ||||
struct ixl_queue *que = vsi->queues; | if_softc_ctx_t scctx = iflib_get_softc_ctx(vsi->ctx); | ||||
struct ixl_tx_queue *tx_que = vsi->tx_queues; | |||||
struct ixl_rx_queue *rx_que = vsi->rx_queues; | |||||
struct tx_ring *txr; | struct tx_ring *txr; | ||||
struct rx_ring *rxr; | struct rx_ring *rxr; | ||||
int len, pairs; | int len, pairs; | ||||
struct i40e_virtchnl_vsi_queue_config_info *vqci; | struct i40e_virtchnl_vsi_queue_config_info *vqci; | ||||
struct i40e_virtchnl_queue_pair_info *vqpi; | struct i40e_virtchnl_queue_pair_info *vqpi; | ||||
pairs = vsi->num_queues; | /* XXX: Linux PF driver wants matching ids in each tx/rx struct, so both TX/RX | ||||
* queues of a pair need to be configured */ | |||||
pairs = max(vsi->num_tx_queues, vsi->num_rx_queues); | |||||
len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) + | len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) + | ||||
(sizeof(struct i40e_virtchnl_queue_pair_info) * pairs); | (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs); | ||||
vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); | vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); | ||||
if (!vqci) { | if (!vqci) { | ||||
device_printf(dev, "%s: unable to allocate memory\n", __func__); | device_printf(dev, "%s: unable to allocate memory\n", __func__); | ||||
ixl_vc_schedule_retry(&sc->vc_mgr); | ixl_vc_schedule_retry(&sc->vc_mgr); | ||||
return; | return; | ||||
} | } | ||||
vqci->vsi_id = sc->vsi_res->vsi_id; | vqci->vsi_id = sc->vsi_res->vsi_id; | ||||
vqci->num_queue_pairs = pairs; | vqci->num_queue_pairs = pairs; | ||||
vqpi = vqci->qpair; | vqpi = vqci->qpair; | ||||
/* Size check is not needed here - HW max is 16 queue pairs, and we | /* Size check is not needed here - HW max is 16 queue pairs, and we | ||||
* can fit info for 31 of them into the AQ buffer before it overflows. | * can fit info for 31 of them into the AQ buffer before it overflows. | ||||
*/ | */ | ||||
for (int i = 0; i < pairs; i++, que++, vqpi++) { | for (int i = 0; i < pairs; i++, tx_que++, rx_que++, vqpi++) { | ||||
txr = &que->txr; | txr = &tx_que->txr; | ||||
rxr = &que->rxr; | rxr = &rx_que->rxr; | ||||
vqpi->txq.vsi_id = vqci->vsi_id; | vqpi->txq.vsi_id = vqci->vsi_id; | ||||
vqpi->txq.queue_id = i; | vqpi->txq.queue_id = i; | ||||
vqpi->txq.ring_len = que->num_desc; | vqpi->txq.ring_len = scctx->isc_ntxd[0]; | ||||
vqpi->txq.dma_ring_addr = txr->dma.pa; | vqpi->txq.dma_ring_addr = txr->tx_paddr; | ||||
/* Enable Head writeback */ | /* Enable Head writeback */ | ||||
vqpi->txq.headwb_enabled = 1; | vqpi->txq.headwb_enabled = 1; | ||||
vqpi->txq.dma_headwb_addr = txr->dma.pa + | vqpi->txq.dma_headwb_addr = txr->tx_paddr + | ||||
(que->num_desc * sizeof(struct i40e_tx_desc)); | (scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc)); | ||||
vqpi->rxq.vsi_id = vqci->vsi_id; | vqpi->rxq.vsi_id = vqci->vsi_id; | ||||
vqpi->rxq.queue_id = i; | vqpi->rxq.queue_id = i; | ||||
vqpi->rxq.ring_len = que->num_desc; | vqpi->rxq.ring_len = scctx->isc_nrxd[0]; | ||||
vqpi->rxq.dma_ring_addr = rxr->dma.pa; | vqpi->rxq.dma_ring_addr = rxr->rx_paddr; | ||||
vqpi->rxq.max_pkt_size = vsi->max_frame_size; | vqpi->rxq.max_pkt_size = scctx->isc_max_frame_size; | ||||
// TODO: Get this value from iflib, somehow | |||||
vqpi->rxq.databuffer_size = rxr->mbuf_sz; | vqpi->rxq.databuffer_size = rxr->mbuf_sz; | ||||
vqpi->rxq.splithdr_enabled = 0; | vqpi->rxq.splithdr_enabled = 0; | ||||
} | } | ||||
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, | ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, | ||||
(u8 *)vqci, len); | (u8 *)vqci, len); | ||||
free(vqci, M_DEVBUF); | free(vqci, M_DEVBUF); | ||||
} | } | ||||
/* | /* | ||||
** ixlv_enable_queues | ** ixlv_enable_queues | ||||
** | ** | ||||
** Request that the PF enable all of our queues. | ** Request that the PF enable all of our queues. | ||||
*/ | */ | ||||
void | void | ||||
ixlv_enable_queues(struct ixlv_sc *sc) | ixlv_enable_queues(struct ixlv_sc *sc) | ||||
{ | { | ||||
struct i40e_virtchnl_queue_select vqs; | struct i40e_virtchnl_queue_select vqs; | ||||
vqs.vsi_id = sc->vsi_res->vsi_id; | vqs.vsi_id = sc->vsi_res->vsi_id; | ||||
/* XXX: In Linux PF, as long as neither of these is 0, | |||||
* every queue in VF VSI is enabled. */ | |||||
vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1; | vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1; | ||||
vqs.rx_queues = vqs.tx_queues; | vqs.rx_queues = vqs.tx_queues; | ||||
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ENABLE_QUEUES, | ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ENABLE_QUEUES, | ||||
(u8 *)&vqs, sizeof(vqs)); | (u8 *)&vqs, sizeof(vqs)); | ||||
} | } | ||||
/* | /* | ||||
** ixlv_disable_queues | ** ixlv_disable_queues | ||||
** | ** | ||||
** Request that the PF disable all of our queues. | ** Request that the PF disable all of our queues. | ||||
*/ | */ | ||||
void | void | ||||
ixlv_disable_queues(struct ixlv_sc *sc) | ixlv_disable_queues(struct ixlv_sc *sc) | ||||
{ | { | ||||
struct i40e_virtchnl_queue_select vqs; | struct i40e_virtchnl_queue_select vqs; | ||||
vqs.vsi_id = sc->vsi_res->vsi_id; | vqs.vsi_id = sc->vsi_res->vsi_id; | ||||
/* XXX: In Linux PF, as long as neither of these is 0, | |||||
* every queue in VF VSI is disabled. */ | |||||
vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1; | vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1; | ||||
vqs.rx_queues = vqs.tx_queues; | vqs.rx_queues = vqs.tx_queues; | ||||
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DISABLE_QUEUES, | ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DISABLE_QUEUES, | ||||
(u8 *)&vqs, sizeof(vqs)); | (u8 *)&vqs, sizeof(vqs)); | ||||
} | } | ||||
/* | /* | ||||
** ixlv_map_queues | ** ixlv_map_queues | ||||
** | ** | ||||
** Request that the PF map queues to interrupt vectors. Misc causes, including | ** Request that the PF map queues to interrupt vectors. Misc causes, including | ||||
** admin queue, are always mapped to vector 0. | ** admin queue, are always mapped to vector 0. | ||||
** | |||||
** XXX: In iflib, only RX queues are mapped to HW interrupts | |||||
*/ | */ | ||||
void | void | ||||
ixlv_map_queues(struct ixlv_sc *sc) | ixlv_map_queues(struct ixlv_sc *sc) | ||||
{ | { | ||||
struct i40e_virtchnl_irq_map_info *vm; | struct i40e_virtchnl_irq_map_info *vm; | ||||
int i, q, len; | int i, q, len; | ||||
struct ixl_vsi *vsi = &sc->vsi; | struct ixl_vsi *vsi = &sc->vsi; | ||||
struct ixl_queue *que = vsi->queues; | struct ixl_rx_queue *rx_que = vsi->rx_queues; | ||||
if_softc_ctx_t scctx = vsi->shared; | |||||
device_t dev = sc->dev; | |||||
// XXX: What happens if we only get 1 MSI-X vector? | |||||
MPASS(scctx->isc_vectors > 1); | |||||
/* How many queue vectors, adminq uses one */ | /* How many queue vectors, adminq uses one */ | ||||
q = sc->msix - 1; | // XXX: How do we know how many interrupt vectors we have? | ||||
q = scctx->isc_vectors - 1; | |||||
len = sizeof(struct i40e_virtchnl_irq_map_info) + | len = sizeof(struct i40e_virtchnl_irq_map_info) + | ||||
(sc->msix * sizeof(struct i40e_virtchnl_vector_map)); | (scctx->isc_vectors * sizeof(struct i40e_virtchnl_vector_map)); | ||||
vm = malloc(len, M_DEVBUF, M_NOWAIT); | vm = malloc(len, M_DEVBUF, M_NOWAIT); | ||||
if (!vm) { | if (!vm) { | ||||
printf("%s: unable to allocate memory\n", __func__); | device_printf(dev, "%s: unable to allocate memory\n", __func__); | ||||
ixl_vc_schedule_retry(&sc->vc_mgr); | ixl_vc_schedule_retry(&sc->vc_mgr); | ||||
return; | return; | ||||
} | } | ||||
vm->num_vectors = sc->msix; | vm->num_vectors = scctx->isc_vectors; | ||||
/* Queue vectors first */ | /* Queue vectors first */ | ||||
for (i = 0; i < q; i++, que++) { | for (i = 0; i < q; i++, rx_que++) { | ||||
vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id; | vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id; | ||||
vm->vecmap[i].vector_id = i + 1; /* first is adminq */ | vm->vecmap[i].vector_id = i + 1; /* first is adminq */ | ||||
vm->vecmap[i].txq_map = (1 << que->me); | vm->vecmap[i].rxq_map = (1 << rx_que->rxr.me); | ||||
vm->vecmap[i].rxq_map = (1 << que->me); | |||||
vm->vecmap[i].rxitr_idx = 0; | vm->vecmap[i].rxitr_idx = 0; | ||||
vm->vecmap[i].txitr_idx = 1; | vm->vecmap[i].txitr_idx = 1; | ||||
} | } | ||||
/* Misc vector last - this is only for AdminQ messages */ | /* Misc vector last - this is only for AdminQ messages */ | ||||
vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id; | vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id; | ||||
vm->vecmap[i].vector_id = 0; | vm->vecmap[i].vector_id = 0; | ||||
vm->vecmap[i].txq_map = 0; | vm->vecmap[i].txq_map = 0; | ||||
▲ Show 20 Lines • Show All 291 Lines • ▼ Show 20 Lines | |||||
*/ | */ | ||||
void | void | ||||
ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es) | ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es) | ||||
{ | { | ||||
struct ixl_vsi *vsi = &sc->vsi; | struct ixl_vsi *vsi = &sc->vsi; | ||||
uint64_t tx_discards; | uint64_t tx_discards; | ||||
tx_discards = es->tx_discards; | tx_discards = es->tx_discards; | ||||
#if 0 | |||||
for (int i = 0; i < vsi->num_queues; i++) | for (int i = 0; i < vsi->num_queues; i++) | ||||
tx_discards += sc->vsi.queues[i].txr.br->br_drops; | tx_discards += sc->vsi.queues[i].txr.br->br_drops; | ||||
#endif | |||||
/* Update ifnet stats */ | /* Update ifnet stats */ | ||||
IXL_SET_IPACKETS(vsi, es->rx_unicast + | IXL_SET_IPACKETS(vsi, es->rx_unicast + | ||||
es->rx_multicast + | es->rx_multicast + | ||||
es->rx_broadcast); | es->rx_broadcast); | ||||
IXL_SET_OPACKETS(vsi, es->tx_unicast + | IXL_SET_OPACKETS(vsi, es->tx_unicast + | ||||
es->tx_multicast + | es->tx_multicast + | ||||
es->tx_broadcast); | es->tx_broadcast); | ||||
▲ Show 20 Lines • Show All 46 Lines • ▼ Show 20 Lines | #endif | ||||
free(rss_key_msg, M_DEVBUF); | free(rss_key_msg, M_DEVBUF); | ||||
} | } | ||||
void | void | ||||
ixlv_set_rss_hena(struct ixlv_sc *sc) | ixlv_set_rss_hena(struct ixlv_sc *sc) | ||||
{ | { | ||||
struct i40e_virtchnl_rss_hena hena; | struct i40e_virtchnl_rss_hena hena; | ||||
struct i40e_hw *hw = &sc->hw; | |||||
if (hw->mac.type == I40E_MAC_X722_VF) | |||||
hena.hena = IXL_DEFAULT_RSS_HENA_X722; | hena.hena = IXL_DEFAULT_RSS_HENA_X722; | ||||
else | |||||
hena.hena = IXL_DEFAULT_RSS_HENA_XL710; | |||||
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_SET_RSS_HENA, | ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_SET_RSS_HENA, | ||||
(u8 *)&hena, sizeof(hena)); | (u8 *)&hena, sizeof(hena)); | ||||
} | } | ||||
void | void | ||||
ixlv_config_rss_lut(struct ixlv_sc *sc) | ixlv_config_rss_lut(struct ixlv_sc *sc) | ||||
{ | { | ||||
Show All 19 Lines | ixlv_config_rss_lut(struct ixlv_sc *sc) | ||||
for (i = 0; i < lut_length; i++) { | for (i = 0; i < lut_length; i++) { | ||||
#ifdef RSS | #ifdef RSS | ||||
/* | /* | ||||
* Fetch the RSS bucket id for the given indirection entry. | * Fetch the RSS bucket id for the given indirection entry. | ||||
* Cap it at the number of configured buckets (which is | * Cap it at the number of configured buckets (which is | ||||
* num_queues.) | * num_queues.) | ||||
*/ | */ | ||||
que_id = rss_get_indirection_to_bucket(i); | que_id = rss_get_indirection_to_bucket(i); | ||||
que_id = que_id % sc->vsi.num_queues; | que_id = que_id % sc->vsi.num_rx_queues; | ||||
#else | #else | ||||
que_id = i % sc->vsi.num_queues; | que_id = i % sc->vsi.num_rx_queues; | ||||
#endif | #endif | ||||
lut = que_id & IXL_RSS_VSI_LUT_ENTRY_MASK; | lut = que_id & IXL_RSS_VSI_LUT_ENTRY_MASK; | ||||
rss_lut_msg->lut[i] = lut; | rss_lut_msg->lut[i] = lut; | ||||
} | } | ||||
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT, | ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT, | ||||
(u8 *)rss_lut_msg, msg_len); | (u8 *)rss_lut_msg, msg_len); | ||||
Show All 30 Lines | #endif | ||||
vpe->event_data.link_event.link_status; | vpe->event_data.link_event.link_status; | ||||
sc->link_speed = | sc->link_speed = | ||||
vpe->event_data.link_event.link_speed; | vpe->event_data.link_event.link_speed; | ||||
ixlv_update_link_status(sc); | ixlv_update_link_status(sc); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_EVENT_RESET_IMPENDING: | case I40E_VIRTCHNL_EVENT_RESET_IMPENDING: | ||||
device_printf(dev, "PF initiated reset!\n"); | device_printf(dev, "PF initiated reset!\n"); | ||||
sc->init_state = IXLV_RESET_PENDING; | sc->init_state = IXLV_RESET_PENDING; | ||||
mtx_unlock(&sc->mtx); | // mtx_unlock(&sc->mtx); | ||||
ixlv_init(vsi); | ixlv_if_init(sc->vsi.ctx); | ||||
mtx_lock(&sc->mtx); | // mtx_lock(&sc->mtx); | ||||
break; | break; | ||||
default: | default: | ||||
device_printf(dev, "%s: Unknown event %d from AQ\n", | device_printf(dev, "%s: Unknown event %d from AQ\n", | ||||
__func__, vpe->event); | __func__, vpe->event); | ||||
break; | break; | ||||
} | } | ||||
return; | return; | ||||
} | } | ||||
/* Catch-all error response */ | /* Catch-all error response */ | ||||
if (v_retval) { | if (v_retval) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"%s: AQ returned error %s to our request %s!\n", | "%s: AQ returned error %d to our request %d!\n", | ||||
__func__, i40e_stat_str(&sc->hw, v_retval), ixl_vc_opcode_str(v_opcode)); | __func__, v_retval, v_opcode); | ||||
} | } | ||||
#ifdef IXL_DEBUG | #ifdef IXL_DEBUG | ||||
if (v_opcode != I40E_VIRTCHNL_OP_GET_STATS) | if (v_opcode != I40E_VIRTCHNL_OP_GET_STATS) | ||||
DDPRINTF(dev, "opcode %d", v_opcode); | DDPRINTF(dev, "opcode %d", v_opcode); | ||||
#endif | #endif | ||||
switch (v_opcode) { | switch (v_opcode) { | ||||
Show All 28 Lines | case I40E_VIRTCHNL_OP_ENABLE_QUEUES: | ||||
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ENABLE_QUEUES, | ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ENABLE_QUEUES, | ||||
v_retval); | v_retval); | ||||
if (v_retval == 0) { | if (v_retval == 0) { | ||||
/* Update link status */ | /* Update link status */ | ||||
ixlv_update_link_status(sc); | ixlv_update_link_status(sc); | ||||
/* Turn on all interrupts */ | /* Turn on all interrupts */ | ||||
ixlv_enable_intr(vsi); | ixlv_enable_intr(vsi); | ||||
/* And inform the stack we're ready */ | /* And inform the stack we're ready */ | ||||
vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING; | // vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING; | ||||
/* TODO: Clear a state flag, so we know we're ready to run init again */ | /* TODO: Clear a state flag, so we know we're ready to run init again */ | ||||
} | } | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_DISABLE_QUEUES: | case I40E_VIRTCHNL_OP_DISABLE_QUEUES: | ||||
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DISABLE_QUEUES, | ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DISABLE_QUEUES, | ||||
v_retval); | v_retval); | ||||
if (v_retval == 0) { | if (v_retval == 0) { | ||||
/* Turn off all interrupts */ | /* Turn off all interrupts */ | ||||
Show All 20 Lines | case I40E_VIRTCHNL_OP_SET_RSS_HENA: | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT: | case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT: | ||||
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_LUT, | ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_LUT, | ||||
v_retval); | v_retval); | ||||
break; | break; | ||||
default: | default: | ||||
#ifdef IXL_DEBUG | #ifdef IXL_DEBUG | ||||
device_printf(dev, | device_printf(dev, | ||||
"%s: Received unexpected message %s from PF.\n", | "%s: Received unexpected message %d from PF.\n", | ||||
__func__, ixl_vc_opcode_str(v_opcode)); | __func__, v_opcode); | ||||
#endif | #endif | ||||
break; | break; | ||||
} | } | ||||
return; | return; | ||||
} | } | ||||
static void | static void | ||||
ixl_vc_send_cmd(struct ixlv_sc *sc, uint32_t request) | ixl_vc_send_cmd(struct ixlv_sc *sc, uint32_t request) | ||||
▲ Show 20 Lines • Show All 82 Lines • ▼ Show 20 Lines | ixl_vc_process_resp(struct ixl_vc_mgr *mgr, uint32_t request, | ||||
ixl_vc_process_completion(mgr, err); | ixl_vc_process_completion(mgr, err); | ||||
} | } | ||||
static void | static void | ||||
ixl_vc_cmd_timeout(void *arg) | ixl_vc_cmd_timeout(void *arg) | ||||
{ | { | ||||
struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg; | struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg; | ||||
IXLV_CORE_LOCK_ASSERT(mgr->sc); | |||||
ixl_vc_process_completion(mgr, I40E_ERR_TIMEOUT); | ixl_vc_process_completion(mgr, I40E_ERR_TIMEOUT); | ||||
} | } | ||||
static void | static void | ||||
ixl_vc_cmd_retry(void *arg) | ixl_vc_cmd_retry(void *arg) | ||||
{ | { | ||||
struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg; | struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg; | ||||
IXLV_CORE_LOCK_ASSERT(mgr->sc); | |||||
ixl_vc_send_current(mgr); | ixl_vc_send_current(mgr); | ||||
} | } | ||||
static void | static void | ||||
ixl_vc_send_current(struct ixl_vc_mgr *mgr) | ixl_vc_send_current(struct ixl_vc_mgr *mgr) | ||||
{ | { | ||||
struct ixl_vc_cmd *cmd; | struct ixl_vc_cmd *cmd; | ||||
Show All 26 Lines | ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr) | ||||
callout_reset(&mgr->callout, howmany(hz, 100), ixl_vc_cmd_retry, mgr); | callout_reset(&mgr->callout, howmany(hz, 100), ixl_vc_cmd_retry, mgr); | ||||
} | } | ||||
void | void | ||||
ixl_vc_enqueue(struct ixl_vc_mgr *mgr, struct ixl_vc_cmd *cmd, | ixl_vc_enqueue(struct ixl_vc_mgr *mgr, struct ixl_vc_cmd *cmd, | ||||
uint32_t req, ixl_vc_callback_t *callback, void *arg) | uint32_t req, ixl_vc_callback_t *callback, void *arg) | ||||
{ | { | ||||
IXLV_CORE_LOCK_ASSERT(mgr->sc); | // IXLV_CORE_LOCK_ASSERT(mgr->sc); | ||||
if (cmd->flags & IXLV_VC_CMD_FLAG_BUSY) { | if (cmd->flags & IXLV_VC_CMD_FLAG_BUSY) { | ||||
if (mgr->current == cmd) | if (mgr->current == cmd) | ||||
mgr->current = NULL; | mgr->current = NULL; | ||||
else | else | ||||
TAILQ_REMOVE(&mgr->pending, cmd, next); | TAILQ_REMOVE(&mgr->pending, cmd, next); | ||||
} | } | ||||
cmd->request = req; | cmd->request = req; | ||||
cmd->callback = callback; | cmd->callback = callback; | ||||
cmd->arg = arg; | cmd->arg = arg; | ||||
cmd->flags |= IXLV_VC_CMD_FLAG_BUSY; | cmd->flags |= IXLV_VC_CMD_FLAG_BUSY; | ||||
TAILQ_INSERT_TAIL(&mgr->pending, cmd, next); | TAILQ_INSERT_TAIL(&mgr->pending, cmd, next); | ||||
ixl_vc_process_next(mgr); | ixl_vc_process_next(mgr); | ||||
} | } | ||||
void | void | ||||
ixl_vc_flush(struct ixl_vc_mgr *mgr) | ixl_vc_flush(struct ixl_vc_mgr *mgr) | ||||
{ | { | ||||
struct ixl_vc_cmd *cmd; | struct ixl_vc_cmd *cmd; | ||||
IXLV_CORE_LOCK_ASSERT(mgr->sc); | // IXLV_CORE_LOCK_ASSERT(mgr->sc); | ||||
KASSERT(TAILQ_EMPTY(&mgr->pending) || mgr->current != NULL, | KASSERT(TAILQ_EMPTY(&mgr->pending) || mgr->current != NULL, | ||||
("ixlv: pending commands waiting but no command in progress")); | ("ixlv: pending commands waiting but no command in progress")); | ||||
cmd = mgr->current; | cmd = mgr->current; | ||||
if (cmd != NULL) { | if (cmd != NULL) { | ||||
mgr->current = NULL; | mgr->current = NULL; | ||||
cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY; | cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY; | ||||
cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED); | cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED); | ||||
Show All 11 Lines |