Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/ixl/ixl_pf_iov.c
/****************************************************************************** | /****************************************************************************** | ||||
Copyright (c) 2013-2015, Intel Corporation | Copyright (c) 2013-2017, Intel Corporation | ||||
All rights reserved. | All rights reserved. | ||||
Redistribution and use in source and binary forms, with or without | Redistribution and use in source and binary forms, with or without | ||||
modification, are permitted provided that the following conditions are met: | modification, are permitted provided that the following conditions are met: | ||||
1. Redistributions of source code must retain the above copyright notice, | 1. Redistributions of source code must retain the above copyright notice, | ||||
this list of conditions and the following disclaimer. | this list of conditions and the following disclaimer. | ||||
▲ Show 20 Lines • Show All 43 Lines • ▼ Show 20 Lines | |||||
static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf); | static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf); | ||||
static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf); | static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf); | ||||
static void ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len); | static void ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len); | ||||
static void ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op); | static void ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op); | ||||
static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line); | static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line); | ||||
static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | ||||
static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | ||||
static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | ||||
static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct i40e_virtchnl_txq_info *info); | static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_txq_info *info); | ||||
static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct i40e_virtchnl_rxq_info *info); | static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_rxq_info *info); | ||||
static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | ||||
static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct i40e_virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue, | static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue, | ||||
enum i40e_queue_type *last_type, uint16_t *last_queue); | enum i40e_queue_type *last_type, uint16_t *last_queue); | ||||
static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct i40e_virtchnl_vector_map *vector); | static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct virtchnl_vector_map *vector); | ||||
static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | ||||
static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | ||||
static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | ||||
static void ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | static void ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | ||||
static void ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | static void ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | ||||
static enum i40e_status_code ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf); | static enum i40e_status_code ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf); | ||||
static void ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | static void ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | ||||
static void ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | static void ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | ||||
static void ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | static void ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | ||||
static void ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | static void ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); | ||||
static int ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues); | static int ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues); | ||||
static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err); | static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err); | ||||
void | void | ||||
ixl_initialize_sriov(struct ixl_pf *pf) | ixl_initialize_sriov(struct ixl_pf *pf) | ||||
{ | { | ||||
device_t dev = pf->dev; | device_t dev = pf->dev; | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
nvlist_t *pf_schema, *vf_schema; | nvlist_t *pf_schema, *vf_schema; | ||||
int iov_error; | int iov_error; | ||||
/* SR-IOV is only supported when MSI-X is in use. */ | /* SR-IOV is only supported when MSI-X is in use. */ | ||||
#if 0 | |||||
if (pf->msix <= 1) | if (pf->msix <= 1) | ||||
return; | return; | ||||
#endif | |||||
pf_schema = pci_iov_schema_alloc_node(); | pf_schema = pci_iov_schema_alloc_node(); | ||||
vf_schema = pci_iov_schema_alloc_node(); | vf_schema = pci_iov_schema_alloc_node(); | ||||
pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); | pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); | ||||
pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof", | pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof", | ||||
IOV_SCHEMA_HASDEFAULT, TRUE); | IOV_SCHEMA_HASDEFAULT, TRUE); | ||||
pci_iov_schema_add_bool(vf_schema, "allow-set-mac", | pci_iov_schema_add_bool(vf_schema, "allow-set-mac", | ||||
IOV_SCHEMA_HASDEFAULT, FALSE); | IOV_SCHEMA_HASDEFAULT, FALSE); | ||||
▲ Show 20 Lines • Show All 65 Lines • ▼ Show 20 Lines | vsi_ctx.info.tc_mapping[0] = htole16( | ||||
(0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | | (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | | ||||
(bsrl(vf->qtag.num_allocated) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)); | (bsrl(vf->qtag.num_allocated) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)); | ||||
code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL); | code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL); | ||||
if (code != I40E_SUCCESS) | if (code != I40E_SUCCESS) | ||||
return (ixl_adminq_err_to_errno(hw->aq.asq_last_status)); | return (ixl_adminq_err_to_errno(hw->aq.asq_last_status)); | ||||
vf->vsi.seid = vsi_ctx.seid; | vf->vsi.seid = vsi_ctx.seid; | ||||
vf->vsi.vsi_num = vsi_ctx.vsi_number; | vf->vsi.vsi_num = vsi_ctx.vsi_number; | ||||
// vf->vsi.first_queue = vf->qtag.qidx[0]; | // TODO: How to deal with num tx queues / num rx queues split? | ||||
vf->vsi.num_queues = vf->qtag.num_active; | // I don't think just assigning this variable is going to work | ||||
vf->vsi.num_rx_queues = vf->qtag.num_active; | |||||
vf->vsi.num_tx_queues = vf->qtag.num_active; | |||||
code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL); | code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL); | ||||
if (code != I40E_SUCCESS) | if (code != I40E_SUCCESS) | ||||
return (ixl_adminq_err_to_errno(hw->aq.asq_last_status)); | return (ixl_adminq_err_to_errno(hw->aq.asq_last_status)); | ||||
code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL); | code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL); | ||||
if (code != I40E_SUCCESS) { | if (code != I40E_SUCCESS) { | ||||
device_printf(dev, "Failed to disable BW limit: %d\n", | device_printf(dev, "Failed to disable BW limit: %d\n", | ||||
Show All 14 Lines | ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf) | ||||
hw = &pf->hw; | hw = &pf->hw; | ||||
error = ixl_vf_alloc_vsi(pf, vf); | error = ixl_vf_alloc_vsi(pf, vf); | ||||
if (error != 0) | if (error != 0) | ||||
return (error); | return (error); | ||||
vf->vsi.hw_filters_add = 0; | vf->vsi.hw_filters_add = 0; | ||||
vf->vsi.hw_filters_del = 0; | vf->vsi.hw_filters_del = 0; | ||||
ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY); | // ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY); | ||||
ixl_reconfigure_filters(&vf->vsi); | ixl_reconfigure_filters(&vf->vsi); | ||||
return (0); | return (0); | ||||
} | } | ||||
static void | static void | ||||
ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, | ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, | ||||
uint32_t val) | uint32_t val) | ||||
Show All 32 Lines | i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num), | ||||
I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); | I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); | ||||
/* Enable LAN traffic on this VF */ | /* Enable LAN traffic on this VF */ | ||||
wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num), | wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num), | ||||
I40E_VPLAN_MAPENA_TXRX_ENA_MASK); | I40E_VPLAN_MAPENA_TXRX_ENA_MASK); | ||||
/* Program index of each VF queue into PF queue space | /* Program index of each VF queue into PF queue space | ||||
* (This is only needed if QTABLE is enabled) */ | * (This is only needed if QTABLE is enabled) */ | ||||
for (i = 0; i < vf->vsi.num_queues; i++) { | for (i = 0; i < vf->vsi.num_tx_queues; i++) { | ||||
qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) << | qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) << | ||||
I40E_VPLAN_QTABLE_QINDEX_SHIFT; | I40E_VPLAN_QTABLE_QINDEX_SHIFT; | ||||
wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable); | wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable); | ||||
} | } | ||||
for (; i < IXL_MAX_VSI_QUEUES; i++) | for (; i < IXL_MAX_VSI_QUEUES; i++) | ||||
wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), | wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), | ||||
I40E_VPLAN_QTABLE_QINDEX_MASK); | I40E_VPLAN_QTABLE_QINDEX_MASK); | ||||
/* Map queues allocated to VF to its VSI; | /* Map queues allocated to VF to its VSI; | ||||
* This mapping matches the VF-wide mapping since the VF | * This mapping matches the VF-wide mapping since the VF | ||||
* is only given a single VSI */ | * is only given a single VSI */ | ||||
for (i = 0; i < vf->vsi.num_queues; i++) | for (i = 0; i < vf->vsi.num_tx_queues; i++) | ||||
ixl_vf_map_vsi_queue(hw, vf, i, | ixl_vf_map_vsi_queue(hw, vf, i, | ||||
ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i)); | ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i)); | ||||
/* Set rest of VSI queues as unused. */ | /* Set rest of VSI queues as unused. */ | ||||
for (; i < IXL_MAX_VSI_QUEUES; i++) | for (; i < IXL_MAX_VSI_QUEUES; i++) | ||||
ixl_vf_map_vsi_queue(hw, vf, i, | ixl_vf_map_vsi_queue(hw, vf, i, | ||||
I40E_VSILAN_QTABLE_QINDEX_0_MASK); | I40E_VSILAN_QTABLE_QINDEX_0_MASK); | ||||
▲ Show 20 Lines • Show All 52 Lines • ▼ Show 20 Lines | ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf) | ||||
/* Index 0 has a special register. */ | /* Index 0 has a special register. */ | ||||
ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num)); | ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num)); | ||||
for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) { | for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) { | ||||
vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num); | vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num); | ||||
ixl_vf_unregister_intr(hw, vpint_reg); | ixl_vf_unregister_intr(hw, vpint_reg); | ||||
} | } | ||||
vf->vsi.num_queues = 0; | vf->vsi.num_tx_queues = 0; | ||||
vf->vsi.num_rx_queues = 0; | |||||
} | } | ||||
static int | static int | ||||
ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf) | ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf) | ||||
{ | { | ||||
struct i40e_hw *hw; | struct i40e_hw *hw; | ||||
int i; | int i; | ||||
uint16_t global_vf_num; | uint16_t global_vf_num; | ||||
▲ Show 20 Lines • Show All 51 Lines • ▼ Show 20 Lines | for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) { | ||||
vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num)); | vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num)); | ||||
if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK) | if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK) | ||||
break; | break; | ||||
} | } | ||||
if (i == IXL_VF_RESET_TIMEOUT) | if (i == IXL_VF_RESET_TIMEOUT) | ||||
device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num); | device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num); | ||||
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED); | wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_COMPLETED); | ||||
vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num)); | vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num)); | ||||
vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; | vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; | ||||
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig); | wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig); | ||||
if (vf->vsi.seid != 0) | if (vf->vsi.seid != 0) | ||||
ixl_disable_rings(&vf->vsi); | ixl_disable_rings(&vf->vsi); | ||||
ixl_vf_release_resources(pf, vf); | ixl_vf_release_resources(pf, vf); | ||||
ixl_vf_setup_vsi(pf, vf); | ixl_vf_setup_vsi(pf, vf); | ||||
ixl_vf_map_queues(pf, vf); | ixl_vf_map_queues(pf, vf); | ||||
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE); | wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_VFACTIVE); | ||||
ixl_flush(hw); | ixl_flush(hw); | ||||
} | } | ||||
static int | static int | ||||
ixl_vc_opcode_level(uint16_t opcode) | ixl_vc_opcode_level(uint16_t opcode) | ||||
{ | { | ||||
switch (opcode) { | switch (opcode) { | ||||
case I40E_VIRTCHNL_OP_GET_STATS: | case VIRTCHNL_OP_GET_STATS: | ||||
return (10); | return (10); | ||||
default: | default: | ||||
return (5); | return (5); | ||||
} | } | ||||
} | } | ||||
static void | static void | ||||
ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, | ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, | ||||
Show All 30 Lines | I40E_VC_DEBUG(pf, 1, | ||||
status, vf->vf_num, file, line); | status, vf->vf_num, file, line); | ||||
ixl_send_vf_msg(pf, vf, op, status, NULL, 0); | ixl_send_vf_msg(pf, vf, op, status, NULL, 0); | ||||
} | } | ||||
static void | static void | ||||
ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ||||
uint16_t msg_size) | uint16_t msg_size) | ||||
{ | { | ||||
struct i40e_virtchnl_version_info reply; | struct virtchnl_version_info reply; | ||||
if (msg_size != sizeof(struct i40e_virtchnl_version_info)) { | if (msg_size != sizeof(struct virtchnl_version_info)) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_VERSION, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
vf->version = ((struct i40e_virtchnl_version_info *)msg)->minor; | vf->version = ((struct virtchnl_version_info *)msg)->minor; | ||||
reply.major = I40E_VIRTCHNL_VERSION_MAJOR; | reply.major = VIRTCHNL_VERSION_MAJOR; | ||||
reply.minor = I40E_VIRTCHNL_VERSION_MINOR; | reply.minor = VIRTCHNL_VERSION_MINOR; | ||||
ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply, | ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply, | ||||
sizeof(reply)); | sizeof(reply)); | ||||
} | } | ||||
static void | static void | ||||
ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ||||
uint16_t msg_size) | uint16_t msg_size) | ||||
{ | { | ||||
if (msg_size != 0) { | if (msg_size != 0) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_RESET_VF, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
ixl_reset_vf(pf, vf); | ixl_reset_vf(pf, vf); | ||||
/* No response to a reset message. */ | /* No response to a reset message. */ | ||||
} | } | ||||
static void | static void | ||||
ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ||||
uint16_t msg_size) | uint16_t msg_size) | ||||
{ | { | ||||
struct i40e_virtchnl_vf_resource reply; | struct virtchnl_vf_resource reply; | ||||
if ((vf->version == 0 && msg_size != 0) || | if ((vf->version == 0 && msg_size != 0) || | ||||
(vf->version == 1 && msg_size != 4)) { | (vf->version == 1 && msg_size != 4)) { | ||||
device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size," | device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size," | ||||
" for VF version %d.%d\n", I40E_VIRTCHNL_VERSION_MAJOR, | " for VF version %d.%d\n", VIRTCHNL_VERSION_MAJOR, | ||||
vf->version); | vf->version); | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
bzero(&reply, sizeof(reply)); | bzero(&reply, sizeof(reply)); | ||||
if (vf->version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS) | if (vf->version == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS) | ||||
reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 | | reply.vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2 | | ||||
I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | | VIRTCHNL_VF_OFFLOAD_RSS_REG | | ||||
I40E_VIRTCHNL_VF_OFFLOAD_VLAN; | VIRTCHNL_VF_OFFLOAD_VLAN; | ||||
else | else | ||||
/* Force VF RSS setup by PF in 1.1+ VFs */ | /* Force VF RSS setup by PF in 1.1+ VFs */ | ||||
reply.vf_offload_flags = *(u32 *)msg & ( | reply.vf_cap_flags = *(u32 *)msg & ( | ||||
I40E_VIRTCHNL_VF_OFFLOAD_L2 | | VIRTCHNL_VF_OFFLOAD_L2 | | ||||
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF | | VIRTCHNL_VF_OFFLOAD_RSS_PF | | ||||
I40E_VIRTCHNL_VF_OFFLOAD_VLAN); | VIRTCHNL_VF_OFFLOAD_VLAN); | ||||
reply.num_vsis = 1; | reply.num_vsis = 1; | ||||
reply.num_queue_pairs = vf->vsi.num_queues; | reply.num_queue_pairs = vf->vsi.num_tx_queues; | ||||
reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf; | reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf; | ||||
reply.rss_key_size = 52; | reply.rss_key_size = 52; | ||||
reply.rss_lut_size = 64; | reply.rss_lut_size = 64; | ||||
reply.vsi_res[0].vsi_id = vf->vsi.vsi_num; | reply.vsi_res[0].vsi_id = vf->vsi.vsi_num; | ||||
reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV; | reply.vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; | ||||
reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues; | reply.vsi_res[0].num_queue_pairs = vf->vsi.num_tx_queues; | ||||
memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN); | memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN); | ||||
ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, | ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES, | ||||
I40E_SUCCESS, &reply, sizeof(reply)); | I40E_SUCCESS, &reply, sizeof(reply)); | ||||
} | } | ||||
static int | static int | ||||
ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, | ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, | ||||
struct i40e_virtchnl_txq_info *info) | struct virtchnl_txq_info *info) | ||||
{ | { | ||||
struct i40e_hw *hw; | struct i40e_hw *hw; | ||||
struct i40e_hmc_obj_txq txq; | struct i40e_hmc_obj_txq txq; | ||||
uint16_t global_queue_num, global_vf_num; | uint16_t global_queue_num, global_vf_num; | ||||
enum i40e_status_code status; | enum i40e_status_code status; | ||||
uint32_t qtx_ctl; | uint32_t qtx_ctl; | ||||
hw = &pf->hw; | hw = &pf->hw; | ||||
Show All 28 Lines | ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, | ||||
ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true); | ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true); | ||||
return (0); | return (0); | ||||
} | } | ||||
static int | static int | ||||
ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, | ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, | ||||
struct i40e_virtchnl_rxq_info *info) | struct virtchnl_rxq_info *info) | ||||
{ | { | ||||
struct i40e_hw *hw; | struct i40e_hw *hw; | ||||
struct i40e_hmc_obj_rxq rxq; | struct i40e_hmc_obj_rxq rxq; | ||||
uint16_t global_queue_num; | uint16_t global_queue_num; | ||||
enum i40e_status_code status; | enum i40e_status_code status; | ||||
hw = &pf->hw; | hw = &pf->hw; | ||||
global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id); | global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id); | ||||
▲ Show 20 Lines • Show All 52 Lines • ▼ Show 20 Lines | ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, | ||||
return (0); | return (0); | ||||
} | } | ||||
static void | static void | ||||
ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ||||
uint16_t msg_size) | uint16_t msg_size) | ||||
{ | { | ||||
struct i40e_virtchnl_vsi_queue_config_info *info; | struct virtchnl_vsi_queue_config_info *info; | ||||
struct i40e_virtchnl_queue_pair_info *pair; | struct virtchnl_queue_pair_info *pair; | ||||
uint16_t expected_msg_size; | uint16_t expected_msg_size; | ||||
int i; | int i; | ||||
if (msg_size < sizeof(*info)) { | if (msg_size < sizeof(*info)) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
info = msg; | info = msg; | ||||
if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_queues) { | if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_tx_queues) { | ||||
device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n", | device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n", | ||||
vf->vf_num, info->num_queue_pairs, vf->vsi.num_queues); | vf->vf_num, info->num_queue_pairs, vf->vsi.num_tx_queues); | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
expected_msg_size = sizeof(*info) + info->num_queue_pairs * sizeof(*pair); | expected_msg_size = sizeof(*info) + info->num_queue_pairs * sizeof(*pair); | ||||
if (msg_size != expected_msg_size) { | if (msg_size != expected_msg_size) { | ||||
device_printf(pf->dev, "VF %d: size of recvd message (%d) does not match expected size (%d)\n", | device_printf(pf->dev, "VF %d: size of recvd message (%d) does not match expected size (%d)\n", | ||||
vf->vf_num, msg_size, expected_msg_size); | vf->vf_num, msg_size, expected_msg_size); | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
if (info->vsi_id != vf->vsi.vsi_num) { | if (info->vsi_id != vf->vsi.vsi_num) { | ||||
device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n", | device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n", | ||||
vf->vf_num, info->vsi_id, vf->vsi.vsi_num); | vf->vf_num, info->vsi_id, vf->vsi.vsi_num); | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
for (i = 0; i < info->num_queue_pairs; i++) { | for (i = 0; i < info->num_queue_pairs; i++) { | ||||
pair = &info->qpair[i]; | pair = &info->qpair[i]; | ||||
if (pair->txq.vsi_id != vf->vsi.vsi_num || | if (pair->txq.vsi_id != vf->vsi.vsi_num || | ||||
pair->rxq.vsi_id != vf->vsi.vsi_num || | pair->rxq.vsi_id != vf->vsi.vsi_num || | ||||
pair->txq.queue_id != pair->rxq.queue_id || | pair->txq.queue_id != pair->rxq.queue_id || | ||||
pair->txq.queue_id >= vf->vsi.num_queues) { | pair->txq.queue_id >= vf->vsi.num_tx_queues) { | ||||
i40e_send_vf_nack(pf, vf, | i40e_send_vf_nack(pf, vf, | ||||
I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); | VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) { | if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) { | ||||
i40e_send_vf_nack(pf, vf, | i40e_send_vf_nack(pf, vf, | ||||
I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); | VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) { | if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) { | ||||
i40e_send_vf_nack(pf, vf, | i40e_send_vf_nack(pf, vf, | ||||
I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); | VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
} | } | ||||
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES); | ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES); | ||||
} | } | ||||
static void | static void | ||||
ixl_vf_set_qctl(struct ixl_pf *pf, | ixl_vf_set_qctl(struct ixl_pf *pf, | ||||
const struct i40e_virtchnl_vector_map *vector, | const struct virtchnl_vector_map *vector, | ||||
enum i40e_queue_type cur_type, uint16_t cur_queue, | enum i40e_queue_type cur_type, uint16_t cur_queue, | ||||
enum i40e_queue_type *last_type, uint16_t *last_queue) | enum i40e_queue_type *last_type, uint16_t *last_queue) | ||||
{ | { | ||||
uint32_t offset, qctl; | uint32_t offset, qctl; | ||||
uint16_t itr_indx; | uint16_t itr_indx; | ||||
if (cur_type == I40E_QUEUE_TYPE_RX) { | if (cur_type == I40E_QUEUE_TYPE_RX) { | ||||
offset = I40E_QINT_RQCTL(cur_queue); | offset = I40E_QINT_RQCTL(cur_queue); | ||||
Show All 12 Lines | ixl_vf_set_qctl(struct ixl_pf *pf, | ||||
wr32(&pf->hw, offset, qctl); | wr32(&pf->hw, offset, qctl); | ||||
*last_type = cur_type; | *last_type = cur_type; | ||||
*last_queue = cur_queue; | *last_queue = cur_queue; | ||||
} | } | ||||
static void | static void | ||||
ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, | ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, | ||||
const struct i40e_virtchnl_vector_map *vector) | const struct virtchnl_vector_map *vector) | ||||
{ | { | ||||
struct i40e_hw *hw; | struct i40e_hw *hw; | ||||
u_int qindex; | u_int qindex; | ||||
enum i40e_queue_type type, last_type; | enum i40e_queue_type type, last_type; | ||||
uint32_t lnklst_reg; | uint32_t lnklst_reg; | ||||
uint16_t rxq_map, txq_map, cur_queue, last_queue; | uint16_t rxq_map, txq_map, cur_queue, last_queue; | ||||
hw = &pf->hw; | hw = &pf->hw; | ||||
Show All 40 Lines | ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, | ||||
ixl_flush(hw); | ixl_flush(hw); | ||||
} | } | ||||
static void | static void | ||||
ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ||||
uint16_t msg_size) | uint16_t msg_size) | ||||
{ | { | ||||
struct i40e_virtchnl_irq_map_info *map; | struct virtchnl_irq_map_info *map; | ||||
struct i40e_virtchnl_vector_map *vector; | struct virtchnl_vector_map *vector; | ||||
struct i40e_hw *hw; | struct i40e_hw *hw; | ||||
int i, largest_txq, largest_rxq; | int i, largest_txq, largest_rxq; | ||||
hw = &pf->hw; | hw = &pf->hw; | ||||
if (msg_size < sizeof(*map)) { | if (msg_size < sizeof(*map)) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
map = msg; | map = msg; | ||||
if (map->num_vectors == 0) { | if (map->num_vectors == 0) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) { | if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
for (i = 0; i < map->num_vectors; i++) { | for (i = 0; i < map->num_vectors; i++) { | ||||
vector = &map->vecmap[i]; | vector = &map->vecmap[i]; | ||||
if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) || | if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) || | ||||
vector->vsi_id != vf->vsi.vsi_num) { | vector->vsi_id != vf->vsi.vsi_num) { | ||||
i40e_send_vf_nack(pf, vf, | i40e_send_vf_nack(pf, vf, | ||||
I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); | VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
if (vector->rxq_map != 0) { | if (vector->rxq_map != 0) { | ||||
largest_rxq = fls(vector->rxq_map) - 1; | largest_rxq = fls(vector->rxq_map) - 1; | ||||
if (largest_rxq >= vf->vsi.num_queues) { | if (largest_rxq >= vf->vsi.num_rx_queues) { | ||||
i40e_send_vf_nack(pf, vf, | i40e_send_vf_nack(pf, vf, | ||||
I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, | VIRTCHNL_OP_CONFIG_IRQ_MAP, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
} | } | ||||
if (vector->txq_map != 0) { | if (vector->txq_map != 0) { | ||||
largest_txq = fls(vector->txq_map) - 1; | largest_txq = fls(vector->txq_map) - 1; | ||||
if (largest_txq >= vf->vsi.num_queues) { | if (largest_txq >= vf->vsi.num_tx_queues) { | ||||
i40e_send_vf_nack(pf, vf, | i40e_send_vf_nack(pf, vf, | ||||
I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, | VIRTCHNL_OP_CONFIG_IRQ_MAP, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
} | } | ||||
if (vector->rxitr_idx > IXL_MAX_ITR_IDX || | if (vector->rxitr_idx > IXL_MAX_ITR_IDX || | ||||
vector->txitr_idx > IXL_MAX_ITR_IDX) { | vector->txitr_idx > IXL_MAX_ITR_IDX) { | ||||
i40e_send_vf_nack(pf, vf, | i40e_send_vf_nack(pf, vf, | ||||
I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, | VIRTCHNL_OP_CONFIG_IRQ_MAP, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
ixl_vf_config_vector(pf, vf, vector); | ixl_vf_config_vector(pf, vf, vector); | ||||
} | } | ||||
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP); | ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP); | ||||
} | } | ||||
static void | static void | ||||
ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ||||
uint16_t msg_size) | uint16_t msg_size) | ||||
{ | { | ||||
struct i40e_virtchnl_queue_select *select; | struct virtchnl_queue_select *select; | ||||
int error = 0; | int error = 0; | ||||
if (msg_size != sizeof(*select)) { | if (msg_size != sizeof(*select)) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
select = msg; | select = msg; | ||||
if (select->vsi_id != vf->vsi.vsi_num || | if (select->vsi_id != vf->vsi.vsi_num || | ||||
select->rx_queues == 0 || select->tx_queues == 0) { | select->rx_queues == 0 || select->tx_queues == 0) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
/* Enable TX rings selected by the VF */ | /* Enable TX rings selected by the VF */ | ||||
for (int i = 0; i < 32; i++) { | for (int i = 0; i < 32; i++) { | ||||
if ((1 << i) & select->tx_queues) { | if ((1 << i) & select->tx_queues) { | ||||
/* Warn if queue is out of VF allocation range */ | /* Warn if queue is out of VF allocation range */ | ||||
if (i >= vf->vsi.num_queues) { | if (i >= vf->vsi.num_tx_queues) { | ||||
device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n", | device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n", | ||||
vf->vf_num, i); | vf->vf_num, i); | ||||
break; | break; | ||||
} | } | ||||
/* Skip this queue if it hasn't been configured */ | /* Skip this queue if it hasn't been configured */ | ||||
if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true)) | if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true)) | ||||
continue; | continue; | ||||
/* Warn if this queue is already marked as enabled */ | /* Warn if this queue is already marked as enabled */ | ||||
if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) | if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) | ||||
device_printf(pf->dev, "VF %d: TX ring %d is already enabled!\n", | device_printf(pf->dev, "VF %d: TX ring %d is already enabled!\n", | ||||
vf->vf_num, i); | vf->vf_num, i); | ||||
error = ixl_enable_tx_ring(pf, &vf->qtag, i); | error = ixl_enable_tx_ring(pf, &vf->qtag, i); | ||||
if (error) | if (error) | ||||
break; | break; | ||||
else | else | ||||
ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true); | ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true); | ||||
} | } | ||||
} | } | ||||
/* Enable RX rings selected by the VF */ | /* Enable RX rings selected by the VF */ | ||||
for (int i = 0; i < 32; i++) { | for (int i = 0; i < 32; i++) { | ||||
if ((1 << i) & select->rx_queues) { | if ((1 << i) & select->rx_queues) { | ||||
/* Warn if queue is out of VF allocation range */ | /* Warn if queue is out of VF allocation range */ | ||||
if (i >= vf->vsi.num_queues) { | if (i >= vf->vsi.num_rx_queues) { | ||||
device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n", | device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n", | ||||
vf->vf_num, i); | vf->vf_num, i); | ||||
break; | break; | ||||
} | } | ||||
/* Skip this queue if it hasn't been configured */ | /* Skip this queue if it hasn't been configured */ | ||||
if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false)) | if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false)) | ||||
continue; | continue; | ||||
/* Warn if this queue is already marked as enabled */ | /* Warn if this queue is already marked as enabled */ | ||||
if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) | if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) | ||||
device_printf(pf->dev, "VF %d: RX ring %d is already enabled!\n", | device_printf(pf->dev, "VF %d: RX ring %d is already enabled!\n", | ||||
vf->vf_num, i); | vf->vf_num, i); | ||||
error = ixl_enable_rx_ring(pf, &vf->qtag, i); | error = ixl_enable_rx_ring(pf, &vf->qtag, i); | ||||
if (error) | if (error) | ||||
break; | break; | ||||
else | else | ||||
ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false); | ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false); | ||||
} | } | ||||
} | } | ||||
if (error) { | if (error) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES, | ||||
I40E_ERR_TIMEOUT); | I40E_ERR_TIMEOUT); | ||||
return; | return; | ||||
} | } | ||||
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES); | ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES); | ||||
} | } | ||||
static void | static void | ||||
ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, | ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, | ||||
void *msg, uint16_t msg_size) | void *msg, uint16_t msg_size) | ||||
{ | { | ||||
struct i40e_virtchnl_queue_select *select; | struct virtchnl_queue_select *select; | ||||
int error = 0; | int error = 0; | ||||
if (msg_size != sizeof(*select)) { | if (msg_size != sizeof(*select)) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
select = msg; | select = msg; | ||||
if (select->vsi_id != vf->vsi.vsi_num || | if (select->vsi_id != vf->vsi.vsi_num || | ||||
select->rx_queues == 0 || select->tx_queues == 0) { | select->rx_queues == 0 || select->tx_queues == 0) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
/* Disable TX rings selected by the VF */ | /* Disable TX rings selected by the VF */ | ||||
for (int i = 0; i < 32; i++) { | for (int i = 0; i < 32; i++) { | ||||
if ((1 << i) & select->tx_queues) { | if ((1 << i) & select->tx_queues) { | ||||
/* Warn if queue is out of VF allocation range */ | /* Warn if queue is out of VF allocation range */ | ||||
if (i >= vf->vsi.num_queues) { | if (i >= vf->vsi.num_tx_queues) { | ||||
device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n", | device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n", | ||||
vf->vf_num, i); | vf->vf_num, i); | ||||
break; | break; | ||||
} | } | ||||
/* Skip this queue if it hasn't been configured */ | /* Skip this queue if it hasn't been configured */ | ||||
if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true)) | if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true)) | ||||
continue; | continue; | ||||
/* Warn if this queue is already marked as disabled */ | /* Warn if this queue is already marked as disabled */ | ||||
Show All 9 Lines | if ((1 << i) & select->tx_queues) { | ||||
ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true); | ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true); | ||||
} | } | ||||
} | } | ||||
/* Enable RX rings selected by the VF */ | /* Enable RX rings selected by the VF */ | ||||
for (int i = 0; i < 32; i++) { | for (int i = 0; i < 32; i++) { | ||||
if ((1 << i) & select->rx_queues) { | if ((1 << i) & select->rx_queues) { | ||||
/* Warn if queue is out of VF allocation range */ | /* Warn if queue is out of VF allocation range */ | ||||
if (i >= vf->vsi.num_queues) { | if (i >= vf->vsi.num_rx_queues) { | ||||
device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n", | device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n", | ||||
vf->vf_num, i); | vf->vf_num, i); | ||||
break; | break; | ||||
} | } | ||||
/* Skip this queue if it hasn't been configured */ | /* Skip this queue if it hasn't been configured */ | ||||
if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false)) | if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false)) | ||||
continue; | continue; | ||||
/* Warn if this queue is already marked as disabled */ | /* Warn if this queue is already marked as disabled */ | ||||
if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) { | if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) { | ||||
device_printf(pf->dev, "VF %d: RX ring %d is already disabled!\n", | device_printf(pf->dev, "VF %d: RX ring %d is already disabled!\n", | ||||
vf->vf_num, i); | vf->vf_num, i); | ||||
continue; | continue; | ||||
} | } | ||||
error = ixl_disable_rx_ring(pf, &vf->qtag, i); | error = ixl_disable_rx_ring(pf, &vf->qtag, i); | ||||
if (error) | if (error) | ||||
break; | break; | ||||
else | else | ||||
ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false); | ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false); | ||||
} | } | ||||
} | } | ||||
if (error) { | if (error) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES, | ||||
I40E_ERR_TIMEOUT); | I40E_ERR_TIMEOUT); | ||||
return; | return; | ||||
} | } | ||||
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES); | ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES); | ||||
} | } | ||||
static bool | static bool | ||||
ixl_zero_mac(const uint8_t *addr) | ixl_zero_mac(const uint8_t *addr) | ||||
{ | { | ||||
uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0}; | uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0}; | ||||
return (cmp_etheraddr(addr, zero)); | return (cmp_etheraddr(addr, zero)); | ||||
} | } | ||||
static bool | static bool | ||||
ixl_bcast_mac(const uint8_t *addr) | ixl_bcast_mac(const uint8_t *addr) | ||||
{ | { | ||||
static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] = | |||||
{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; | |||||
return (cmp_etheraddr(addr, ixl_bcast_addr)); | return (cmp_etheraddr(addr, ixl_bcast_addr)); | ||||
} | } | ||||
static int | static int | ||||
ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr) | ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr) | ||||
{ | { | ||||
Show All 11 Lines | ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr) | ||||
return (0); | return (0); | ||||
} | } | ||||
static void | static void | ||||
ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ||||
uint16_t msg_size) | uint16_t msg_size) | ||||
{ | { | ||||
struct i40e_virtchnl_ether_addr_list *addr_list; | struct virtchnl_ether_addr_list *addr_list; | ||||
struct i40e_virtchnl_ether_addr *addr; | struct virtchnl_ether_addr *addr; | ||||
struct ixl_vsi *vsi; | struct ixl_vsi *vsi; | ||||
int i; | int i; | ||||
size_t expected_size; | size_t expected_size; | ||||
vsi = &vf->vsi; | vsi = &vf->vsi; | ||||
if (msg_size < sizeof(*addr_list)) { | if (msg_size < sizeof(*addr_list)) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
addr_list = msg; | addr_list = msg; | ||||
expected_size = sizeof(*addr_list) + | expected_size = sizeof(*addr_list) + | ||||
addr_list->num_elements * sizeof(*addr); | addr_list->num_elements * sizeof(*addr); | ||||
if (addr_list->num_elements == 0 || | if (addr_list->num_elements == 0 || | ||||
addr_list->vsi_id != vsi->vsi_num || | addr_list->vsi_id != vsi->vsi_num || | ||||
msg_size != expected_size) { | msg_size != expected_size) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
for (i = 0; i < addr_list->num_elements; i++) { | for (i = 0; i < addr_list->num_elements; i++) { | ||||
if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) { | if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) { | ||||
i40e_send_vf_nack(pf, vf, | i40e_send_vf_nack(pf, vf, | ||||
I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM); | VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
} | } | ||||
for (i = 0; i < addr_list->num_elements; i++) { | for (i = 0; i < addr_list->num_elements; i++) { | ||||
addr = &addr_list->list[i]; | addr = &addr_list->list[i]; | ||||
ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY); | ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY); | ||||
} | } | ||||
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS); | ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR); | ||||
} | } | ||||
static void | static void | ||||
ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ||||
uint16_t msg_size) | uint16_t msg_size) | ||||
{ | { | ||||
struct i40e_virtchnl_ether_addr_list *addr_list; | struct virtchnl_ether_addr_list *addr_list; | ||||
struct i40e_virtchnl_ether_addr *addr; | struct virtchnl_ether_addr *addr; | ||||
size_t expected_size; | size_t expected_size; | ||||
int i; | int i; | ||||
if (msg_size < sizeof(*addr_list)) { | if (msg_size < sizeof(*addr_list)) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
addr_list = msg; | addr_list = msg; | ||||
expected_size = sizeof(*addr_list) + | expected_size = sizeof(*addr_list) + | ||||
addr_list->num_elements * sizeof(*addr); | addr_list->num_elements * sizeof(*addr); | ||||
if (addr_list->num_elements == 0 || | if (addr_list->num_elements == 0 || | ||||
addr_list->vsi_id != vf->vsi.vsi_num || | addr_list->vsi_id != vf->vsi.vsi_num || | ||||
msg_size != expected_size) { | msg_size != expected_size) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
for (i = 0; i < addr_list->num_elements; i++) { | for (i = 0; i < addr_list->num_elements; i++) { | ||||
addr = &addr_list->list[i]; | addr = &addr_list->list[i]; | ||||
if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) { | if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) { | ||||
i40e_send_vf_nack(pf, vf, | i40e_send_vf_nack(pf, vf, | ||||
I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM); | VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
} | } | ||||
for (i = 0; i < addr_list->num_elements; i++) { | for (i = 0; i < addr_list->num_elements; i++) { | ||||
addr = &addr_list->list[i]; | addr = &addr_list->list[i]; | ||||
ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY); | ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY); | ||||
} | } | ||||
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS); | ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR); | ||||
} | } | ||||
static enum i40e_status_code | static enum i40e_status_code | ||||
ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf) | ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf) | ||||
{ | { | ||||
struct i40e_vsi_context vsi_ctx; | struct i40e_vsi_context vsi_ctx; | ||||
vsi_ctx.seid = vf->vsi.seid; | vsi_ctx.seid = vf->vsi.seid; | ||||
bzero(&vsi_ctx.info, sizeof(vsi_ctx.info)); | bzero(&vsi_ctx.info, sizeof(vsi_ctx.info)); | ||||
vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID); | vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID); | ||||
vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | | vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | | ||||
I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; | I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; | ||||
return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL)); | return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL)); | ||||
} | } | ||||
static void | static void | ||||
ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ||||
uint16_t msg_size) | uint16_t msg_size) | ||||
{ | { | ||||
struct i40e_virtchnl_vlan_filter_list *filter_list; | struct virtchnl_vlan_filter_list *filter_list; | ||||
enum i40e_status_code code; | enum i40e_status_code code; | ||||
size_t expected_size; | size_t expected_size; | ||||
int i; | int i; | ||||
if (msg_size < sizeof(*filter_list)) { | if (msg_size < sizeof(*filter_list)) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
filter_list = msg; | filter_list = msg; | ||||
expected_size = sizeof(*filter_list) + | expected_size = sizeof(*filter_list) + | ||||
filter_list->num_elements * sizeof(uint16_t); | filter_list->num_elements * sizeof(uint16_t); | ||||
if (filter_list->num_elements == 0 || | if (filter_list->num_elements == 0 || | ||||
filter_list->vsi_id != vf->vsi.vsi_num || | filter_list->vsi_id != vf->vsi.vsi_num || | ||||
msg_size != expected_size) { | msg_size != expected_size) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) { | if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
for (i = 0; i < filter_list->num_elements; i++) { | for (i = 0; i < filter_list->num_elements; i++) { | ||||
if (filter_list->vlan_id[i] > EVL_VLID_MASK) { | if (filter_list->vlan_id[i] > EVL_VLID_MASK) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
} | } | ||||
code = ixl_vf_enable_vlan_strip(pf, vf); | code = ixl_vf_enable_vlan_strip(pf, vf); | ||||
if (code != I40E_SUCCESS) { | if (code != I40E_SUCCESS) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
} | } | ||||
for (i = 0; i < filter_list->num_elements; i++) | for (i = 0; i < filter_list->num_elements; i++) | ||||
ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]); | ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]); | ||||
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN); | ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_VLAN); | ||||
} | } | ||||
static void | static void | ||||
ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ||||
uint16_t msg_size) | uint16_t msg_size) | ||||
{ | { | ||||
struct i40e_virtchnl_vlan_filter_list *filter_list; | struct virtchnl_vlan_filter_list *filter_list; | ||||
int i; | int i; | ||||
size_t expected_size; | size_t expected_size; | ||||
if (msg_size < sizeof(*filter_list)) { | if (msg_size < sizeof(*filter_list)) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
filter_list = msg; | filter_list = msg; | ||||
expected_size = sizeof(*filter_list) + | expected_size = sizeof(*filter_list) + | ||||
filter_list->num_elements * sizeof(uint16_t); | filter_list->num_elements * sizeof(uint16_t); | ||||
if (filter_list->num_elements == 0 || | if (filter_list->num_elements == 0 || | ||||
filter_list->vsi_id != vf->vsi.vsi_num || | filter_list->vsi_id != vf->vsi.vsi_num || | ||||
msg_size != expected_size) { | msg_size != expected_size) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
for (i = 0; i < filter_list->num_elements; i++) { | for (i = 0; i < filter_list->num_elements; i++) { | ||||
if (filter_list->vlan_id[i] > EVL_VLID_MASK) { | if (filter_list->vlan_id[i] > EVL_VLID_MASK) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
} | } | ||||
if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) { | if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
for (i = 0; i < filter_list->num_elements; i++) | for (i = 0; i < filter_list->num_elements; i++) | ||||
ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]); | ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]); | ||||
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN); | ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_VLAN); | ||||
} | } | ||||
static void | static void | ||||
ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, | ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, | ||||
void *msg, uint16_t msg_size) | void *msg, uint16_t msg_size) | ||||
{ | { | ||||
struct i40e_virtchnl_promisc_info *info; | struct virtchnl_promisc_info *info; | ||||
enum i40e_status_code code; | enum i40e_status_code code; | ||||
if (msg_size != sizeof(*info)) { | if (msg_size != sizeof(*info)) { | ||||
i40e_send_vf_nack(pf, vf, | i40e_send_vf_nack(pf, vf, | ||||
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM); | VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) { | if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) { | ||||
i40e_send_vf_nack(pf, vf, | i40e_send_vf_nack(pf, vf, | ||||
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM); | VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
info = msg; | info = msg; | ||||
if (info->vsi_id != vf->vsi.vsi_num) { | if (info->vsi_id != vf->vsi.vsi_num) { | ||||
i40e_send_vf_nack(pf, vf, | i40e_send_vf_nack(pf, vf, | ||||
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM); | VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id, | code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id, | ||||
info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL, TRUE); | info->flags & FLAG_VF_UNICAST_PROMISC, NULL, TRUE); | ||||
if (code != I40E_SUCCESS) { | if (code != I40E_SUCCESS) { | ||||
i40e_send_vf_nack(pf, vf, | i40e_send_vf_nack(pf, vf, | ||||
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code); | VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code); | ||||
return; | return; | ||||
} | } | ||||
code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id, | code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id, | ||||
info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL); | info->flags & FLAG_VF_MULTICAST_PROMISC, NULL); | ||||
if (code != I40E_SUCCESS) { | if (code != I40E_SUCCESS) { | ||||
i40e_send_vf_nack(pf, vf, | i40e_send_vf_nack(pf, vf, | ||||
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code); | VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code); | ||||
return; | return; | ||||
} | } | ||||
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE); | ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE); | ||||
} | } | ||||
static void | static void | ||||
ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ||||
uint16_t msg_size) | uint16_t msg_size) | ||||
{ | { | ||||
struct i40e_virtchnl_queue_select *queue; | struct virtchnl_queue_select *queue; | ||||
if (msg_size != sizeof(*queue)) { | if (msg_size != sizeof(*queue)) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
queue = msg; | queue = msg; | ||||
if (queue->vsi_id != vf->vsi.vsi_num) { | if (queue->vsi_id != vf->vsi.vsi_num) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
ixl_update_eth_stats(&vf->vsi); | ixl_update_eth_stats(&vf->vsi); | ||||
ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS, | ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_STATS, | ||||
I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats)); | I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats)); | ||||
} | } | ||||
static void | static void | ||||
ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ||||
uint16_t msg_size) | uint16_t msg_size) | ||||
{ | { | ||||
struct i40e_hw *hw; | struct i40e_hw *hw; | ||||
struct i40e_virtchnl_rss_key *key; | struct virtchnl_rss_key *key; | ||||
struct i40e_aqc_get_set_rss_key_data key_data; | struct i40e_aqc_get_set_rss_key_data key_data; | ||||
enum i40e_status_code status; | enum i40e_status_code status; | ||||
hw = &pf->hw; | hw = &pf->hw; | ||||
if (msg_size < sizeof(*key)) { | if (msg_size < sizeof(*key)) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
key = msg; | key = msg; | ||||
if (key->key_len > 52) { | if (key->key_len > 52) { | ||||
device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n", | device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n", | ||||
vf->vf_num, key->key_len, 52); | vf->vf_num, key->key_len, 52); | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
if (key->vsi_id != vf->vsi.vsi_num) { | if (key->vsi_id != vf->vsi.vsi_num) { | ||||
device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n", | device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n", | ||||
vf->vf_num, key->vsi_id, vf->vsi.vsi_num); | vf->vf_num, key->vsi_id, vf->vsi.vsi_num); | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
/* Fill out hash using MAC-dependent method */ | /* Fill out hash using MAC-dependent method */ | ||||
if (hw->mac.type == I40E_MAC_X722) { | if (hw->mac.type == I40E_MAC_X722) { | ||||
bzero(&key_data, sizeof(key_data)); | bzero(&key_data, sizeof(key_data)); | ||||
if (key->key_len <= 40) | if (key->key_len <= 40) | ||||
bcopy(key->key, key_data.standard_rss_key, key->key_len); | bcopy(key->key, key_data.standard_rss_key, key->key_len); | ||||
else { | else { | ||||
bcopy(key->key, key_data.standard_rss_key, 40); | bcopy(key->key, key_data.standard_rss_key, 40); | ||||
bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40); | bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40); | ||||
} | } | ||||
status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data); | status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data); | ||||
if (status) { | if (status) { | ||||
device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n", | device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n", | ||||
i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); | i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY, | ||||
I40E_ERR_ADMIN_QUEUE_ERROR); | I40E_ERR_ADMIN_QUEUE_ERROR); | ||||
return; | return; | ||||
} | } | ||||
} else { | } else { | ||||
for (int i = 0; i < (key->key_len / 4); i++) | for (int i = 0; i < (key->key_len / 4); i++) | ||||
i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, IXL_GLOBAL_VF_NUM(hw, vf)), ((u32 *)key->key)[i]); | i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]); | ||||
} | } | ||||
DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!", | DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!", | ||||
vf->vf_num, key->key[0]); | vf->vf_num, key->key[0]); | ||||
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY); | ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY); | ||||
} | } | ||||
static void | static void | ||||
ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ||||
uint16_t msg_size) | uint16_t msg_size) | ||||
{ | { | ||||
struct i40e_hw *hw; | struct i40e_hw *hw; | ||||
struct i40e_virtchnl_rss_lut *lut; | struct virtchnl_rss_lut *lut; | ||||
enum i40e_status_code status; | enum i40e_status_code status; | ||||
hw = &pf->hw; | hw = &pf->hw; | ||||
if (msg_size < sizeof(*lut)) { | if (msg_size < sizeof(*lut)) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
lut = msg; | lut = msg; | ||||
if (lut->lut_entries > 64) { | if (lut->lut_entries > 64) { | ||||
device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n", | device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n", | ||||
vf->vf_num, lut->lut_entries, 64); | vf->vf_num, lut->lut_entries, 64); | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
if (lut->vsi_id != vf->vsi.vsi_num) { | if (lut->vsi_id != vf->vsi.vsi_num) { | ||||
device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n", | device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n", | ||||
vf->vf_num, lut->vsi_id, vf->vsi.vsi_num); | vf->vf_num, lut->vsi_id, vf->vsi.vsi_num); | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
/* Fill out LUT using MAC-dependent method */ | /* Fill out LUT using MAC-dependent method */ | ||||
if (hw->mac.type == I40E_MAC_X722) { | if (hw->mac.type == I40E_MAC_X722) { | ||||
status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries); | status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries); | ||||
if (status) { | if (status) { | ||||
device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n", | device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n", | ||||
i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); | i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT, | ||||
I40E_ERR_ADMIN_QUEUE_ERROR); | I40E_ERR_ADMIN_QUEUE_ERROR); | ||||
return; | return; | ||||
} | } | ||||
} else { | } else { | ||||
for (int i = 0; i < (lut->lut_entries / 4); i++) | for (int i = 0; i < (lut->lut_entries / 4); i++) | ||||
i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, IXL_GLOBAL_VF_NUM(hw, vf)), ((u32 *)lut->lut)[i]); | i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]); | ||||
} | } | ||||
DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!", | DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!", | ||||
vf->vf_num, lut->lut[0], lut->lut_entries); | vf->vf_num, lut->lut[0], lut->lut_entries); | ||||
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT); | ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT); | ||||
} | } | ||||
static void | static void | ||||
ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, | ||||
uint16_t msg_size) | uint16_t msg_size) | ||||
{ | { | ||||
struct i40e_hw *hw; | struct i40e_hw *hw; | ||||
struct i40e_virtchnl_rss_hena *hena; | struct virtchnl_rss_hena *hena; | ||||
hw = &pf->hw; | hw = &pf->hw; | ||||
if (msg_size < sizeof(*hena)) { | if (msg_size < sizeof(*hena)) { | ||||
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_SET_RSS_HENA, | i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA, | ||||
I40E_ERR_PARAM); | I40E_ERR_PARAM); | ||||
return; | return; | ||||
} | } | ||||
hena = msg; | hena = msg; | ||||
/* Set HENA */ | /* Set HENA */ | ||||
i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, IXL_GLOBAL_VF_NUM(hw, vf)), (u32)hena->hena); | i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena); | ||||
i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, IXL_GLOBAL_VF_NUM(hw, vf)), (u32)(hena->hena >> 32)); | i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32)); | ||||
DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx", | DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx", | ||||
vf->vf_num, hena->hena); | vf->vf_num, hena->hena); | ||||
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_SET_RSS_HENA); | ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA); | ||||
} | } | ||||
static void | |||||
ixl_notify_vf_link_state(struct ixl_pf *pf, struct ixl_vf *vf) | |||||
{ | |||||
struct virtchnl_pf_event event; | |||||
struct i40e_hw *hw; | |||||
hw = &pf->hw; | |||||
event.event = VIRTCHNL_EVENT_LINK_CHANGE; | |||||
event.severity = PF_EVENT_SEVERITY_INFO; | |||||
event.event_data.link_event.link_status = pf->vsi.link_active; | |||||
event.event_data.link_event.link_speed = | |||||
(enum virtchnl_link_speed)hw->phy.link_info.link_speed; | |||||
ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_EVENT, I40E_SUCCESS, &event, | |||||
sizeof(event)); | |||||
} | |||||
void | void | ||||
ixl_broadcast_link_state(struct ixl_pf *pf) | |||||
{ | |||||
int i; | |||||
for (i = 0; i < pf->num_vfs; i++) | |||||
ixl_notify_vf_link_state(pf, &pf->vfs[i]); | |||||
} | |||||
void | |||||
ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event) | ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event) | ||||
{ | { | ||||
struct ixl_vf *vf; | struct ixl_vf *vf; | ||||
void *msg; | void *msg; | ||||
uint16_t vf_num, msg_size; | uint16_t vf_num, msg_size; | ||||
uint32_t opcode; | uint32_t opcode; | ||||
vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id; | vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id; | ||||
Show All 14 Lines | I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode), | ||||
(vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ", | (vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ", | ||||
vf_num, msg_size); | vf_num, msg_size); | ||||
/* This must be a stray msg from a previously destroyed VF. */ | /* This must be a stray msg from a previously destroyed VF. */ | ||||
if (!(vf->vf_flags & VF_FLAG_ENABLED)) | if (!(vf->vf_flags & VF_FLAG_ENABLED)) | ||||
return; | return; | ||||
switch (opcode) { | switch (opcode) { | ||||
case I40E_VIRTCHNL_OP_VERSION: | case VIRTCHNL_OP_VERSION: | ||||
ixl_vf_version_msg(pf, vf, msg, msg_size); | ixl_vf_version_msg(pf, vf, msg, msg_size); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_RESET_VF: | case VIRTCHNL_OP_RESET_VF: | ||||
ixl_vf_reset_msg(pf, vf, msg, msg_size); | ixl_vf_reset_msg(pf, vf, msg, msg_size); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: | case VIRTCHNL_OP_GET_VF_RESOURCES: | ||||
ixl_vf_get_resources_msg(pf, vf, msg, msg_size); | ixl_vf_get_resources_msg(pf, vf, msg, msg_size); | ||||
/* Notify VF of link state after it obtains queues, as this is | |||||
* the last thing it will do as part of initialization | |||||
*/ | |||||
ixl_notify_vf_link_state(pf, vf); | |||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: | case VIRTCHNL_OP_CONFIG_VSI_QUEUES: | ||||
ixl_vf_config_vsi_msg(pf, vf, msg, msg_size); | ixl_vf_config_vsi_msg(pf, vf, msg, msg_size); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: | case VIRTCHNL_OP_CONFIG_IRQ_MAP: | ||||
ixl_vf_config_irq_msg(pf, vf, msg, msg_size); | ixl_vf_config_irq_msg(pf, vf, msg, msg_size); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_ENABLE_QUEUES: | case VIRTCHNL_OP_ENABLE_QUEUES: | ||||
ixl_vf_enable_queues_msg(pf, vf, msg, msg_size); | ixl_vf_enable_queues_msg(pf, vf, msg, msg_size); | ||||
/* Notify VF of link state after it obtains queues, as this is | |||||
* the last thing it will do as part of initialization | |||||
*/ | |||||
ixl_notify_vf_link_state(pf, vf); | |||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_DISABLE_QUEUES: | case VIRTCHNL_OP_DISABLE_QUEUES: | ||||
ixl_vf_disable_queues_msg(pf, vf, msg, msg_size); | ixl_vf_disable_queues_msg(pf, vf, msg, msg_size); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: | case VIRTCHNL_OP_ADD_ETH_ADDR: | ||||
ixl_vf_add_mac_msg(pf, vf, msg, msg_size); | ixl_vf_add_mac_msg(pf, vf, msg, msg_size); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: | case VIRTCHNL_OP_DEL_ETH_ADDR: | ||||
ixl_vf_del_mac_msg(pf, vf, msg, msg_size); | ixl_vf_del_mac_msg(pf, vf, msg, msg_size); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_ADD_VLAN: | case VIRTCHNL_OP_ADD_VLAN: | ||||
ixl_vf_add_vlan_msg(pf, vf, msg, msg_size); | ixl_vf_add_vlan_msg(pf, vf, msg, msg_size); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_DEL_VLAN: | case VIRTCHNL_OP_DEL_VLAN: | ||||
ixl_vf_del_vlan_msg(pf, vf, msg, msg_size); | ixl_vf_del_vlan_msg(pf, vf, msg, msg_size); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: | case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: | ||||
ixl_vf_config_promisc_msg(pf, vf, msg, msg_size); | ixl_vf_config_promisc_msg(pf, vf, msg, msg_size); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_GET_STATS: | case VIRTCHNL_OP_GET_STATS: | ||||
ixl_vf_get_stats_msg(pf, vf, msg, msg_size); | ixl_vf_get_stats_msg(pf, vf, msg, msg_size); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY: | case VIRTCHNL_OP_CONFIG_RSS_KEY: | ||||
ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size); | ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT: | case VIRTCHNL_OP_CONFIG_RSS_LUT: | ||||
ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size); | ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size); | ||||
break; | break; | ||||
case I40E_VIRTCHNL_OP_SET_RSS_HENA: | case VIRTCHNL_OP_SET_RSS_HENA: | ||||
ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size); | ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size); | ||||
break; | break; | ||||
/* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */ | /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */ | ||||
case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: | case VIRTCHNL_OP_CONFIG_TX_QUEUE: | ||||
case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE: | case VIRTCHNL_OP_CONFIG_RX_QUEUE: | ||||
default: | default: | ||||
i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED); | i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED); | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
/* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */ | /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */ | ||||
void | void | ||||
ixl_handle_vflr(void *arg, int pending) | ixl_handle_vflr(void *arg, int pending) | ||||
{ | { | ||||
struct ixl_pf *pf; | struct ixl_pf *pf; | ||||
struct ixl_vf *vf; | struct ixl_vf *vf; | ||||
struct i40e_hw *hw; | struct i40e_hw *hw; | ||||
uint16_t global_vf_num; | uint16_t global_vf_num; | ||||
uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0; | uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0; | ||||
int i; | int i; | ||||
pf = arg; | pf = arg; | ||||
hw = &pf->hw; | hw = &pf->hw; | ||||
IXL_PF_LOCK(pf); | /* TODO: May need to lock this */ | ||||
for (i = 0; i < pf->num_vfs; i++) { | for (i = 0; i < pf->num_vfs; i++) { | ||||
global_vf_num = hw->func_caps.vf_base_id + i; | global_vf_num = hw->func_caps.vf_base_id + i; | ||||
vf = &pf->vfs[i]; | vf = &pf->vfs[i]; | ||||
if (!(vf->vf_flags & VF_FLAG_ENABLED)) | if (!(vf->vf_flags & VF_FLAG_ENABLED)) | ||||
continue; | continue; | ||||
vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num); | vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num); | ||||
vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num); | vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num); | ||||
vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index)); | vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index)); | ||||
if (vflrstat & vflrstat_mask) { | if (vflrstat & vflrstat_mask) { | ||||
wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index), | wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index), | ||||
vflrstat_mask); | vflrstat_mask); | ||||
ixl_reinit_vf(pf, vf); | ixl_reinit_vf(pf, vf); | ||||
} | } | ||||
} | } | ||||
atomic_clear_32(&pf->state, IXL_PF_STATE_VF_RESET_REQ); | |||||
icr0 = rd32(hw, I40E_PFINT_ICR0_ENA); | icr0 = rd32(hw, I40E_PFINT_ICR0_ENA); | ||||
icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK; | icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK; | ||||
wr32(hw, I40E_PFINT_ICR0_ENA, icr0); | wr32(hw, I40E_PFINT_ICR0_ENA, icr0); | ||||
ixl_flush(hw); | ixl_flush(hw); | ||||
IXL_PF_UNLOCK(pf); | // IXL_PF_UNLOCK() | ||||
} | } | ||||
static int | static int | ||||
ixl_adminq_err_to_errno(enum i40e_admin_queue_err err) | ixl_adminq_err_to_errno(enum i40e_admin_queue_err err) | ||||
{ | { | ||||
switch (err) { | switch (err) { | ||||
case I40E_AQ_RC_EPERM: | case I40E_AQ_RC_EPERM: | ||||
▲ Show 20 Lines • Show All 53 Lines • ▼ Show 20 Lines | ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params) | ||||
struct ixl_vsi *pf_vsi; | struct ixl_vsi *pf_vsi; | ||||
enum i40e_status_code ret; | enum i40e_status_code ret; | ||||
int i, error; | int i, error; | ||||
pf = device_get_softc(dev); | pf = device_get_softc(dev); | ||||
hw = &pf->hw; | hw = &pf->hw; | ||||
pf_vsi = &pf->vsi; | pf_vsi = &pf->vsi; | ||||
IXL_PF_LOCK(pf); | //IXL_PF_LOCK(pf); | ||||
pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT | | pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT | | ||||
M_ZERO); | M_ZERO); | ||||
if (pf->vfs == NULL) { | if (pf->vfs == NULL) { | ||||
error = ENOMEM; | error = ENOMEM; | ||||
goto fail; | goto fail; | ||||
} | } | ||||
for (i = 0; i < num_vfs; i++) | for (i = 0; i < num_vfs; i++) | ||||
sysctl_ctx_init(&pf->vfs[i].ctx); | sysctl_ctx_init(&pf->vfs[i].ctx); | ||||
ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid, | ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid, | ||||
1, FALSE, &pf->veb_seid, FALSE, NULL); | 1, FALSE, &pf->veb_seid, FALSE, NULL); | ||||
if (ret != I40E_SUCCESS) { | if (ret != I40E_SUCCESS) { | ||||
error = ixl_adminq_err_to_errno(hw->aq.asq_last_status); | error = ixl_adminq_err_to_errno(hw->aq.asq_last_status); | ||||
device_printf(dev, "add_veb failed; code=%d error=%d", ret, | device_printf(dev, "add_veb failed; code=%d error=%d", ret, | ||||
error); | error); | ||||
goto fail; | goto fail; | ||||
} | } | ||||
pf->num_vfs = num_vfs; | pf->num_vfs = num_vfs; | ||||
IXL_PF_UNLOCK(pf); | //IXL_PF_UNLOCK(pf); | ||||
return (0); | return (0); | ||||
fail: | fail: | ||||
free(pf->vfs, M_IXL); | free(pf->vfs, M_IXL); | ||||
pf->vfs = NULL; | pf->vfs = NULL; | ||||
IXL_PF_UNLOCK(pf); | //IXL_PF_UNLOCK(pf); | ||||
return (error); | return (error); | ||||
} | } | ||||
void | void | ||||
ixl_iov_uninit(device_t dev) | ixl_iov_uninit(device_t dev) | ||||
{ | { | ||||
struct ixl_pf *pf; | struct ixl_pf *pf; | ||||
struct i40e_hw *hw; | struct i40e_hw *hw; | ||||
struct ixl_vsi *vsi; | struct ixl_vsi *vsi; | ||||
struct ifnet *ifp; | struct ifnet *ifp; | ||||
struct ixl_vf *vfs; | struct ixl_vf *vfs; | ||||
int i, num_vfs; | int i, num_vfs; | ||||
pf = device_get_softc(dev); | pf = device_get_softc(dev); | ||||
hw = &pf->hw; | hw = &pf->hw; | ||||
vsi = &pf->vsi; | vsi = &pf->vsi; | ||||
ifp = vsi->ifp; | ifp = vsi->ifp; | ||||
IXL_PF_LOCK(pf); | //IXL_PF_LOCK(pf); | ||||
for (i = 0; i < pf->num_vfs; i++) { | for (i = 0; i < pf->num_vfs; i++) { | ||||
if (pf->vfs[i].vsi.seid != 0) | if (pf->vfs[i].vsi.seid != 0) | ||||
i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL); | i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL); | ||||
ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag); | ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag); | ||||
ixl_free_mac_filters(&pf->vfs[i].vsi); | |||||
DDPRINTF(dev, "VF %d: %d released\n", | DDPRINTF(dev, "VF %d: %d released\n", | ||||
i, pf->vfs[i].qtag.num_allocated); | i, pf->vfs[i].qtag.num_allocated); | ||||
DDPRINTF(dev, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr)); | DDPRINTF(dev, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr)); | ||||
} | } | ||||
if (pf->veb_seid != 0) { | if (pf->veb_seid != 0) { | ||||
i40e_aq_delete_element(hw, pf->veb_seid, NULL); | i40e_aq_delete_element(hw, pf->veb_seid, NULL); | ||||
pf->veb_seid = 0; | pf->veb_seid = 0; | ||||
} | } | ||||
vfs = pf->vfs; | vfs = pf->vfs; | ||||
num_vfs = pf->num_vfs; | num_vfs = pf->num_vfs; | ||||
pf->vfs = NULL; | pf->vfs = NULL; | ||||
pf->num_vfs = 0; | pf->num_vfs = 0; | ||||
IXL_PF_UNLOCK(pf); | //IXL_PF_UNLOCK(pf); | ||||
/* Do this after the unlock as sysctl_ctx_free might sleep. */ | /* Do this after the unlock as sysctl_ctx_free might sleep. */ | ||||
for (i = 0; i < num_vfs; i++) | for (i = 0; i < num_vfs; i++) | ||||
sysctl_ctx_free(&vfs[i].ctx); | sysctl_ctx_free(&vfs[i].ctx); | ||||
free(vfs, M_IXL); | free(vfs, M_IXL); | ||||
} | } | ||||
static int | static int | ||||
Show All 36 Lines | ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) | ||||
const void *mac; | const void *mac; | ||||
size_t size; | size_t size; | ||||
int error; | int error; | ||||
int vf_num_queues; | int vf_num_queues; | ||||
pf = device_get_softc(dev); | pf = device_get_softc(dev); | ||||
vf = &pf->vfs[vfnum]; | vf = &pf->vfs[vfnum]; | ||||
IXL_PF_LOCK(pf); | //IXL_PF_LOCK(pf); | ||||
vf->vf_num = vfnum; | vf->vf_num = vfnum; | ||||
vf->vsi.back = pf; | vf->vsi.back = pf; | ||||
vf->vf_flags = VF_FLAG_ENABLED; | vf->vf_flags = VF_FLAG_ENABLED; | ||||
SLIST_INIT(&vf->vsi.ftl); | SLIST_INIT(&vf->vsi.ftl); | ||||
/* Reserve queue allocation from PF */ | /* Reserve queue allocation from PF */ | ||||
vf_num_queues = nvlist_get_number(params, "num-queues"); | vf_num_queues = nvlist_get_number(params, "num-queues"); | ||||
Show All 23 Lines | ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) | ||||
if (nvlist_get_bool(params, "allow-promisc")) | if (nvlist_get_bool(params, "allow-promisc")) | ||||
vf->vf_flags |= VF_FLAG_PROMISC_CAP; | vf->vf_flags |= VF_FLAG_PROMISC_CAP; | ||||
vf->vf_flags |= VF_FLAG_VLAN_CAP; | vf->vf_flags |= VF_FLAG_VLAN_CAP; | ||||
ixl_reset_vf(pf, vf); | ixl_reset_vf(pf, vf); | ||||
out: | out: | ||||
IXL_PF_UNLOCK(pf); | //IXL_PF_UNLOCK(pf); | ||||
if (error == 0) { | if (error == 0) { | ||||
snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum); | snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum); | ||||
ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name); | ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name); | ||||
} | } | ||||
return (error); | return (error); | ||||
} | } | ||||