Index: head/sys/dev/ixl/i40e_common.c =================================================================== --- head/sys/dev/ixl/i40e_common.c (revision 299550) +++ head/sys/dev/ixl/i40e_common.c (revision 299551) @@ -1,5502 +1,5507 @@ /****************************************************************************** Copyright (c) 2013-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "i40e_type.h" #include "i40e_adminq.h" #include "i40e_prototype.h" #include "i40e_virtchnl.h" /** * i40e_set_mac_type - Sets MAC type * @hw: pointer to the HW structure * * This function sets the mac type of the adapter based on the * vendor ID and device ID stored in the hw structure. **/ enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw) { enum i40e_status_code status = I40E_SUCCESS; DEBUGFUNC("i40e_set_mac_type\n"); if (hw->vendor_id == I40E_INTEL_VENDOR_ID) { switch (hw->device_id) { case I40E_DEV_ID_SFP_XL710: case I40E_DEV_ID_QEMU: case I40E_DEV_ID_KX_B: case I40E_DEV_ID_KX_C: case I40E_DEV_ID_QSFP_A: case I40E_DEV_ID_QSFP_B: case I40E_DEV_ID_QSFP_C: case I40E_DEV_ID_10G_BASE_T: case I40E_DEV_ID_10G_BASE_T4: case I40E_DEV_ID_20G_KR2: case I40E_DEV_ID_20G_KR2_A: hw->mac.type = I40E_MAC_XL710; break; case I40E_DEV_ID_VF: case I40E_DEV_ID_VF_HV: hw->mac.type = I40E_MAC_VF; break; default: hw->mac.type = I40E_MAC_GENERIC; break; } } else { status = I40E_ERR_DEVICE_NOT_SUPPORTED; } DEBUGOUT2("i40e_set_mac_type found mac: %d, returns: %d\n", hw->mac.type, status); return status; } /** * i40e_aq_str - convert AQ err code to a string * @hw: pointer to the HW structure * @aq_err: the AQ error code to convert **/ const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) { switch (aq_err) { case I40E_AQ_RC_OK: return "OK"; case I40E_AQ_RC_EPERM: return "I40E_AQ_RC_EPERM"; case I40E_AQ_RC_ENOENT: return "I40E_AQ_RC_ENOENT"; case I40E_AQ_RC_ESRCH: return "I40E_AQ_RC_ESRCH"; case I40E_AQ_RC_EINTR: return "I40E_AQ_RC_EINTR"; case I40E_AQ_RC_EIO: return "I40E_AQ_RC_EIO"; case I40E_AQ_RC_ENXIO: return "I40E_AQ_RC_ENXIO"; case I40E_AQ_RC_E2BIG: return "I40E_AQ_RC_E2BIG"; case I40E_AQ_RC_EAGAIN: return "I40E_AQ_RC_EAGAIN"; case I40E_AQ_RC_ENOMEM: return "I40E_AQ_RC_ENOMEM"; case I40E_AQ_RC_EACCES: return "I40E_AQ_RC_EACCES"; case I40E_AQ_RC_EFAULT: return "I40E_AQ_RC_EFAULT"; case I40E_AQ_RC_EBUSY: return "I40E_AQ_RC_EBUSY"; case I40E_AQ_RC_EEXIST: return "I40E_AQ_RC_EEXIST"; case I40E_AQ_RC_EINVAL: return "I40E_AQ_RC_EINVAL"; case I40E_AQ_RC_ENOTTY: return "I40E_AQ_RC_ENOTTY"; case I40E_AQ_RC_ENOSPC: return "I40E_AQ_RC_ENOSPC"; case I40E_AQ_RC_ENOSYS: return "I40E_AQ_RC_ENOSYS"; case I40E_AQ_RC_ERANGE: return "I40E_AQ_RC_ERANGE"; case I40E_AQ_RC_EFLUSHED: return "I40E_AQ_RC_EFLUSHED"; case I40E_AQ_RC_BAD_ADDR: return "I40E_AQ_RC_BAD_ADDR"; case I40E_AQ_RC_EMODE: return "I40E_AQ_RC_EMODE"; case I40E_AQ_RC_EFBIG: return "I40E_AQ_RC_EFBIG"; } snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); return hw->err_str; } /** * i40e_stat_str - convert status err code to a string * @hw: pointer to the HW structure * @stat_err: the status error code to convert **/ const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err) { switch (stat_err) { case I40E_SUCCESS: return "OK"; case I40E_ERR_NVM: return "I40E_ERR_NVM"; case I40E_ERR_NVM_CHECKSUM: return "I40E_ERR_NVM_CHECKSUM"; case I40E_ERR_PHY: return "I40E_ERR_PHY"; case I40E_ERR_CONFIG: return "I40E_ERR_CONFIG"; case I40E_ERR_PARAM: return "I40E_ERR_PARAM"; case I40E_ERR_MAC_TYPE: return "I40E_ERR_MAC_TYPE"; case I40E_ERR_UNKNOWN_PHY: return "I40E_ERR_UNKNOWN_PHY"; case I40E_ERR_LINK_SETUP: return "I40E_ERR_LINK_SETUP"; case I40E_ERR_ADAPTER_STOPPED: return "I40E_ERR_ADAPTER_STOPPED"; case I40E_ERR_INVALID_MAC_ADDR: return "I40E_ERR_INVALID_MAC_ADDR"; case I40E_ERR_DEVICE_NOT_SUPPORTED: return "I40E_ERR_DEVICE_NOT_SUPPORTED"; case I40E_ERR_MASTER_REQUESTS_PENDING: return "I40E_ERR_MASTER_REQUESTS_PENDING"; case I40E_ERR_INVALID_LINK_SETTINGS: return "I40E_ERR_INVALID_LINK_SETTINGS"; case I40E_ERR_AUTONEG_NOT_COMPLETE: return "I40E_ERR_AUTONEG_NOT_COMPLETE"; case I40E_ERR_RESET_FAILED: return "I40E_ERR_RESET_FAILED"; case I40E_ERR_SWFW_SYNC: return "I40E_ERR_SWFW_SYNC"; case I40E_ERR_NO_AVAILABLE_VSI: return "I40E_ERR_NO_AVAILABLE_VSI"; case I40E_ERR_NO_MEMORY: return "I40E_ERR_NO_MEMORY"; case I40E_ERR_BAD_PTR: return "I40E_ERR_BAD_PTR"; case I40E_ERR_RING_FULL: return "I40E_ERR_RING_FULL"; case I40E_ERR_INVALID_PD_ID: return "I40E_ERR_INVALID_PD_ID"; case I40E_ERR_INVALID_QP_ID: return "I40E_ERR_INVALID_QP_ID"; case I40E_ERR_INVALID_CQ_ID: return "I40E_ERR_INVALID_CQ_ID"; case I40E_ERR_INVALID_CEQ_ID: return "I40E_ERR_INVALID_CEQ_ID"; case I40E_ERR_INVALID_AEQ_ID: return "I40E_ERR_INVALID_AEQ_ID"; case I40E_ERR_INVALID_SIZE: return "I40E_ERR_INVALID_SIZE"; case I40E_ERR_INVALID_ARP_INDEX: return "I40E_ERR_INVALID_ARP_INDEX"; case I40E_ERR_INVALID_FPM_FUNC_ID: return "I40E_ERR_INVALID_FPM_FUNC_ID"; case I40E_ERR_QP_INVALID_MSG_SIZE: return "I40E_ERR_QP_INVALID_MSG_SIZE"; case I40E_ERR_QP_TOOMANY_WRS_POSTED: return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; case I40E_ERR_INVALID_FRAG_COUNT: return "I40E_ERR_INVALID_FRAG_COUNT"; case I40E_ERR_QUEUE_EMPTY: return "I40E_ERR_QUEUE_EMPTY"; case I40E_ERR_INVALID_ALIGNMENT: return "I40E_ERR_INVALID_ALIGNMENT"; case I40E_ERR_FLUSHED_QUEUE: return "I40E_ERR_FLUSHED_QUEUE"; case I40E_ERR_INVALID_PUSH_PAGE_INDEX: return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; case I40E_ERR_INVALID_IMM_DATA_SIZE: return "I40E_ERR_INVALID_IMM_DATA_SIZE"; case I40E_ERR_TIMEOUT: return "I40E_ERR_TIMEOUT"; case I40E_ERR_OPCODE_MISMATCH: return "I40E_ERR_OPCODE_MISMATCH"; case I40E_ERR_CQP_COMPL_ERROR: return "I40E_ERR_CQP_COMPL_ERROR"; case I40E_ERR_INVALID_VF_ID: return "I40E_ERR_INVALID_VF_ID"; case I40E_ERR_INVALID_HMCFN_ID: return "I40E_ERR_INVALID_HMCFN_ID"; case I40E_ERR_BACKING_PAGE_ERROR: return "I40E_ERR_BACKING_PAGE_ERROR"; case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; case I40E_ERR_INVALID_PBLE_INDEX: return "I40E_ERR_INVALID_PBLE_INDEX"; case I40E_ERR_INVALID_SD_INDEX: return "I40E_ERR_INVALID_SD_INDEX"; case I40E_ERR_INVALID_PAGE_DESC_INDEX: return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; case I40E_ERR_INVALID_SD_TYPE: return "I40E_ERR_INVALID_SD_TYPE"; case I40E_ERR_MEMCPY_FAILED: return "I40E_ERR_MEMCPY_FAILED"; case I40E_ERR_INVALID_HMC_OBJ_INDEX: return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; case I40E_ERR_INVALID_HMC_OBJ_COUNT: return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; case I40E_ERR_INVALID_SRQ_ARM_LIMIT: return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; case I40E_ERR_SRQ_ENABLED: return "I40E_ERR_SRQ_ENABLED"; case I40E_ERR_ADMIN_QUEUE_ERROR: return "I40E_ERR_ADMIN_QUEUE_ERROR"; case I40E_ERR_ADMIN_QUEUE_TIMEOUT: return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; case I40E_ERR_BUF_TOO_SHORT: return "I40E_ERR_BUF_TOO_SHORT"; case I40E_ERR_ADMIN_QUEUE_FULL: return "I40E_ERR_ADMIN_QUEUE_FULL"; case I40E_ERR_ADMIN_QUEUE_NO_WORK: return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; case I40E_ERR_BAD_IWARP_CQE: return "I40E_ERR_BAD_IWARP_CQE"; case I40E_ERR_NVM_BLANK_MODE: return "I40E_ERR_NVM_BLANK_MODE"; case I40E_ERR_NOT_IMPLEMENTED: return "I40E_ERR_NOT_IMPLEMENTED"; case I40E_ERR_PE_DOORBELL_NOT_ENABLED: return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; case I40E_ERR_DIAG_TEST_FAILED: return "I40E_ERR_DIAG_TEST_FAILED"; case I40E_ERR_NOT_READY: return "I40E_ERR_NOT_READY"; case I40E_NOT_SUPPORTED: return "I40E_NOT_SUPPORTED"; case I40E_ERR_FIRMWARE_API_VERSION: return "I40E_ERR_FIRMWARE_API_VERSION"; } snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); return hw->err_str; } /** * i40e_debug_aq * @hw: debug mask related to admin queue * @mask: debug mask * @desc: pointer to admin queue descriptor * @buffer: pointer to command buffer * @buf_len: max length of buffer * * Dumps debug log about adminq command with descriptor contents. **/ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, void *buffer, u16 buf_len) { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; u16 len = LE16_TO_CPU(aq_desc->datalen); u8 *buf = (u8 *)buffer; u16 i = 0; if ((!(mask & hw->debug_mask)) || (desc == NULL)) return; i40e_debug(hw, mask, "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", LE16_TO_CPU(aq_desc->opcode), LE16_TO_CPU(aq_desc->flags), LE16_TO_CPU(aq_desc->datalen), LE16_TO_CPU(aq_desc->retval)); i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", LE32_TO_CPU(aq_desc->cookie_high), LE32_TO_CPU(aq_desc->cookie_low)); i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", LE32_TO_CPU(aq_desc->params.internal.param0), LE32_TO_CPU(aq_desc->params.internal.param1)); i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", LE32_TO_CPU(aq_desc->params.external.addr_high), LE32_TO_CPU(aq_desc->params.external.addr_low)); if ((buffer != NULL) && (aq_desc->datalen != 0)) { i40e_debug(hw, mask, "AQ CMD Buffer:\n"); if (buf_len < len) len = buf_len; /* write the full 16-byte chunks */ for (i = 0; i < (len - 16); i += 16) i40e_debug(hw, mask, "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", i, buf[i], buf[i+1], buf[i+2], buf[i+3], buf[i+4], buf[i+5], buf[i+6], buf[i+7], buf[i+8], buf[i+9], buf[i+10], buf[i+11], buf[i+12], buf[i+13], buf[i+14], buf[i+15]); /* the most we could have left is 16 bytes, pad with zeros */ if (i < len) { char d_buf[16]; int j; memset(d_buf, 0, sizeof(d_buf)); for (j = 0; i < len; j++, i++) d_buf[j] = buf[i]; i40e_debug(hw, mask, "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", i, d_buf[0], d_buf[1], d_buf[2], d_buf[3], d_buf[4], d_buf[5], d_buf[6], d_buf[7], d_buf[8], d_buf[9], d_buf[10], d_buf[11], d_buf[12], d_buf[13], d_buf[14], d_buf[15]); } } } /** * i40e_check_asq_alive * @hw: pointer to the hw struct * * Returns TRUE if Queue is enabled else FALSE. **/ bool i40e_check_asq_alive(struct i40e_hw *hw) { if (hw->aq.asq.len) if (!i40e_is_vf(hw)) return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK); if (i40e_is_vf(hw)) return !!(rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQENABLE_MASK); return FALSE; } /** * i40e_aq_queue_shutdown * @hw: pointer to the hw struct * @unloading: is the driver unloading itself * * Tell the Firmware that we're shutting down the AdminQ and whether * or not the driver is unloading as well. **/ enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading) { struct i40e_aq_desc desc; struct i40e_aqc_queue_shutdown *cmd = (struct i40e_aqc_queue_shutdown *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_queue_shutdown); if (unloading) cmd->driver_unloading = CPU_TO_LE32(I40E_AQ_DRIVER_UNLOADING); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); return status; } /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the * hardware to a bit-field that can be used by SW to more easily determine the * packet type. * * Macros are used to shorten the table lines and make this table human * readable. * * We store the PTYPE in the top byte of the bit field - this is just so that * we can check that the table doesn't have a row missing, as the index into * the table should be the PTYPE. * * Typical work flow: * * IF NOT i40e_ptype_lookup[ptype].known * THEN * Packet is unknown * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP * Use the rest of the fields to look at the tunnels, inner protocols, etc * ELSE * Use the enum i40e_rx_l2_ptype to decode the packet type * ENDIF */ /* macro to make the table lines short */ #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ { PTYPE, \ 1, \ I40E_RX_PTYPE_OUTER_##OUTER_IP, \ I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ I40E_RX_PTYPE_##OUTER_FRAG, \ I40E_RX_PTYPE_TUNNEL_##T, \ I40E_RX_PTYPE_TUNNEL_END_##TE, \ I40E_RX_PTYPE_##TEF, \ I40E_RX_PTYPE_INNER_PROT_##I, \ I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } #define I40E_PTT_UNUSED_ENTRY(PTYPE) \ { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* shorter macros makes the table fit but are terse */ #define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG #define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG #define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC /* Lookup table mapping the HW PTYPE to the bit field for decoding */ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { /* L2 Packet types */ I40E_PTT_UNUSED_ENTRY(0), I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT_UNUSED_ENTRY(4), I40E_PTT_UNUSED_ENTRY(5), I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT_UNUSED_ENTRY(8), I40E_PTT_UNUSED_ENTRY(9), I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), /* Non Tunneled IPv4 */ I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(25), I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), /* IPv4 --> IPv4 */ I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(32), I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), /* IPv4 --> IPv6 */ I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(39), I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT */ I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), /* IPv4 --> GRE/NAT --> IPv4 */ I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(47), I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT --> IPv6 */ I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(54), I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT --> MAC */ I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(62), I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(69), I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT --> MAC/VLAN */ I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(77), I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(84), I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), /* Non Tunneled IPv6 */ I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), I40E_PTT_UNUSED_ENTRY(91), I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), /* IPv6 --> IPv4 */ I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(98), I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), /* IPv6 --> IPv6 */ I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(105), I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT */ I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), /* IPv6 --> GRE/NAT -> IPv4 */ I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(113), I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> IPv6 */ I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(120), I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC */ I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(128), I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(135), I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC/VLAN */ I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(143), I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(150), I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), /* unused entries */ I40E_PTT_UNUSED_ENTRY(154), I40E_PTT_UNUSED_ENTRY(155), I40E_PTT_UNUSED_ENTRY(156), I40E_PTT_UNUSED_ENTRY(157), I40E_PTT_UNUSED_ENTRY(158), I40E_PTT_UNUSED_ENTRY(159), I40E_PTT_UNUSED_ENTRY(160), I40E_PTT_UNUSED_ENTRY(161), I40E_PTT_UNUSED_ENTRY(162), I40E_PTT_UNUSED_ENTRY(163), I40E_PTT_UNUSED_ENTRY(164), I40E_PTT_UNUSED_ENTRY(165), I40E_PTT_UNUSED_ENTRY(166), I40E_PTT_UNUSED_ENTRY(167), I40E_PTT_UNUSED_ENTRY(168), I40E_PTT_UNUSED_ENTRY(169), I40E_PTT_UNUSED_ENTRY(170), I40E_PTT_UNUSED_ENTRY(171), I40E_PTT_UNUSED_ENTRY(172), I40E_PTT_UNUSED_ENTRY(173), I40E_PTT_UNUSED_ENTRY(174), I40E_PTT_UNUSED_ENTRY(175), I40E_PTT_UNUSED_ENTRY(176), I40E_PTT_UNUSED_ENTRY(177), I40E_PTT_UNUSED_ENTRY(178), I40E_PTT_UNUSED_ENTRY(179), I40E_PTT_UNUSED_ENTRY(180), I40E_PTT_UNUSED_ENTRY(181), I40E_PTT_UNUSED_ENTRY(182), I40E_PTT_UNUSED_ENTRY(183), I40E_PTT_UNUSED_ENTRY(184), I40E_PTT_UNUSED_ENTRY(185), I40E_PTT_UNUSED_ENTRY(186), I40E_PTT_UNUSED_ENTRY(187), I40E_PTT_UNUSED_ENTRY(188), I40E_PTT_UNUSED_ENTRY(189), I40E_PTT_UNUSED_ENTRY(190), I40E_PTT_UNUSED_ENTRY(191), I40E_PTT_UNUSED_ENTRY(192), I40E_PTT_UNUSED_ENTRY(193), I40E_PTT_UNUSED_ENTRY(194), I40E_PTT_UNUSED_ENTRY(195), I40E_PTT_UNUSED_ENTRY(196), I40E_PTT_UNUSED_ENTRY(197), I40E_PTT_UNUSED_ENTRY(198), I40E_PTT_UNUSED_ENTRY(199), I40E_PTT_UNUSED_ENTRY(200), I40E_PTT_UNUSED_ENTRY(201), I40E_PTT_UNUSED_ENTRY(202), I40E_PTT_UNUSED_ENTRY(203), I40E_PTT_UNUSED_ENTRY(204), I40E_PTT_UNUSED_ENTRY(205), I40E_PTT_UNUSED_ENTRY(206), I40E_PTT_UNUSED_ENTRY(207), I40E_PTT_UNUSED_ENTRY(208), I40E_PTT_UNUSED_ENTRY(209), I40E_PTT_UNUSED_ENTRY(210), I40E_PTT_UNUSED_ENTRY(211), I40E_PTT_UNUSED_ENTRY(212), I40E_PTT_UNUSED_ENTRY(213), I40E_PTT_UNUSED_ENTRY(214), I40E_PTT_UNUSED_ENTRY(215), I40E_PTT_UNUSED_ENTRY(216), I40E_PTT_UNUSED_ENTRY(217), I40E_PTT_UNUSED_ENTRY(218), I40E_PTT_UNUSED_ENTRY(219), I40E_PTT_UNUSED_ENTRY(220), I40E_PTT_UNUSED_ENTRY(221), I40E_PTT_UNUSED_ENTRY(222), I40E_PTT_UNUSED_ENTRY(223), I40E_PTT_UNUSED_ENTRY(224), I40E_PTT_UNUSED_ENTRY(225), I40E_PTT_UNUSED_ENTRY(226), I40E_PTT_UNUSED_ENTRY(227), I40E_PTT_UNUSED_ENTRY(228), I40E_PTT_UNUSED_ENTRY(229), I40E_PTT_UNUSED_ENTRY(230), I40E_PTT_UNUSED_ENTRY(231), I40E_PTT_UNUSED_ENTRY(232), I40E_PTT_UNUSED_ENTRY(233), I40E_PTT_UNUSED_ENTRY(234), I40E_PTT_UNUSED_ENTRY(235), I40E_PTT_UNUSED_ENTRY(236), I40E_PTT_UNUSED_ENTRY(237), I40E_PTT_UNUSED_ENTRY(238), I40E_PTT_UNUSED_ENTRY(239), I40E_PTT_UNUSED_ENTRY(240), I40E_PTT_UNUSED_ENTRY(241), I40E_PTT_UNUSED_ENTRY(242), I40E_PTT_UNUSED_ENTRY(243), I40E_PTT_UNUSED_ENTRY(244), I40E_PTT_UNUSED_ENTRY(245), I40E_PTT_UNUSED_ENTRY(246), I40E_PTT_UNUSED_ENTRY(247), I40E_PTT_UNUSED_ENTRY(248), I40E_PTT_UNUSED_ENTRY(249), I40E_PTT_UNUSED_ENTRY(250), I40E_PTT_UNUSED_ENTRY(251), I40E_PTT_UNUSED_ENTRY(252), I40E_PTT_UNUSED_ENTRY(253), I40E_PTT_UNUSED_ENTRY(254), I40E_PTT_UNUSED_ENTRY(255) }; /** * i40e_validate_mac_addr - Validate unicast MAC address * @mac_addr: pointer to MAC address * * Tests a MAC address to ensure it is a valid Individual Address **/ enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr) { enum i40e_status_code status = I40E_SUCCESS; DEBUGFUNC("i40e_validate_mac_addr"); /* Broadcast addresses ARE multicast addresses * Make sure it is not a multicast address * Reject the zero address */ if (I40E_IS_MULTICAST(mac_addr) || (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)) status = I40E_ERR_INVALID_MAC_ADDR; return status; } /** * i40e_init_shared_code - Initialize the shared code * @hw: pointer to hardware structure * * This assigns the MAC type and PHY code and inits the NVM. * Does not touch the hardware. This function must be called prior to any * other function in the shared code. The i40e_hw structure should be * memset to 0 prior to calling this function. The following fields in * hw structure should be filled in prior to calling this function: * hw_addr, back, device_id, vendor_id, subsystem_device_id, * subsystem_vendor_id, and revision_id **/ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw) { enum i40e_status_code status = I40E_SUCCESS; u32 port, ari, func_rid; DEBUGFUNC("i40e_init_shared_code"); i40e_set_mac_type(hw); switch (hw->mac.type) { case I40E_MAC_XL710: break; default: return I40E_ERR_DEVICE_NOT_SUPPORTED; } hw->phy.get_link_info = TRUE; /* Determine port number and PF number*/ port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; hw->port = (u8)port; ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; func_rid = rd32(hw, I40E_PF_FUNC_RID); if (ari) hw->pf_id = (u8)(func_rid & 0xff); else hw->pf_id = (u8)(func_rid & 0x7); status = i40e_init_nvm(hw); return status; } /** * i40e_aq_mac_address_read - Retrieve the MAC addresses * @hw: pointer to the hw struct * @flags: a return indicator of what addresses were added to the addr store * @addrs: the requestor's mac addr store * @cmd_details: pointer to command details structure or NULL **/ static enum i40e_status_code i40e_aq_mac_address_read(struct i40e_hw *hw, u16 *flags, struct i40e_aqc_mac_address_read_data *addrs, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_mac_address_read *cmd_data = (struct i40e_aqc_mac_address_read *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF); status = i40e_asq_send_command(hw, &desc, addrs, sizeof(*addrs), cmd_details); *flags = LE16_TO_CPU(cmd_data->command_flags); return status; } /** * i40e_aq_mac_address_write - Change the MAC addresses * @hw: pointer to the hw struct * @flags: indicates which MAC to be written * @mac_addr: address to write * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw, u16 flags, u8 *mac_addr, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_mac_address_write *cmd_data = (struct i40e_aqc_mac_address_write *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_write); cmd_data->command_flags = CPU_TO_LE16(flags); cmd_data->mac_sah = CPU_TO_LE16((u16)mac_addr[0] << 8 | mac_addr[1]); cmd_data->mac_sal = CPU_TO_LE32(((u32)mac_addr[2] << 24) | ((u32)mac_addr[3] << 16) | ((u32)mac_addr[4] << 8) | mac_addr[5]); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_get_mac_addr - get MAC address * @hw: pointer to the HW structure * @mac_addr: pointer to MAC address * * Reads the adapter's MAC address from register **/ enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) { struct i40e_aqc_mac_address_read_data addrs; enum i40e_status_code status; u16 flags = 0; status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); if (flags & I40E_AQC_LAN_ADDR_VALID) memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac)); return status; } /** * i40e_get_port_mac_addr - get Port MAC address * @hw: pointer to the HW structure * @mac_addr: pointer to Port MAC address * * Reads the adapter's Port MAC address **/ enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) { struct i40e_aqc_mac_address_read_data addrs; enum i40e_status_code status; u16 flags = 0; status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); if (status) return status; if (flags & I40E_AQC_PORT_ADDR_VALID) memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac)); else status = I40E_ERR_INVALID_MAC_ADDR; return status; } /** * i40e_pre_tx_queue_cfg - pre tx queue configure * @hw: pointer to the HW structure * @queue: target pf queue index * @enable: state change request * * Handles hw requirement to indicate intention to enable * or disable target queue. **/ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) { u32 abs_queue_idx = hw->func_caps.base_queue + queue; u32 reg_block = 0; u32 reg_val; if (abs_queue_idx >= 128) { reg_block = abs_queue_idx / 128; abs_queue_idx %= 128; } reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); if (enable) reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK; else reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); } /** * i40e_read_pba_string - Reads part number string from EEPROM * @hw: pointer to hardware structure * @pba_num: stores the part number string from the EEPROM * @pba_num_size: part number string buffer length * * Reads the part number string from the EEPROM. **/ enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, u32 pba_num_size) { enum i40e_status_code status = I40E_SUCCESS; u16 pba_word = 0; u16 pba_size = 0; u16 pba_ptr = 0; u16 i = 0; status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); if ((status != I40E_SUCCESS) || (pba_word != 0xFAFA)) { DEBUGOUT("Failed to read PBA flags or flag is invalid.\n"); return status; } status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); if (status != I40E_SUCCESS) { DEBUGOUT("Failed to read PBA Block pointer.\n"); return status; } status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); if (status != I40E_SUCCESS) { DEBUGOUT("Failed to read PBA Block size.\n"); return status; } /* Subtract one to get PBA word count (PBA Size word is included in * total size) */ pba_size--; if (pba_num_size < (((u32)pba_size * 2) + 1)) { DEBUGOUT("Buffer to small for PBA data.\n"); return I40E_ERR_PARAM; } for (i = 0; i < pba_size; i++) { status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word); if (status != I40E_SUCCESS) { DEBUGOUT1("Failed to read PBA Block word %d.\n", i); return status; } pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; pba_num[(i * 2) + 1] = pba_word & 0xFF; } pba_num[(pba_size * 2)] = '\0'; return status; } /** * i40e_get_media_type - Gets media type * @hw: pointer to the hardware structure **/ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) { enum i40e_media_type media; switch (hw->phy.link_info.phy_type) { case I40E_PHY_TYPE_10GBASE_SR: case I40E_PHY_TYPE_10GBASE_LR: case I40E_PHY_TYPE_1000BASE_SX: case I40E_PHY_TYPE_1000BASE_LX: case I40E_PHY_TYPE_40GBASE_SR4: case I40E_PHY_TYPE_40GBASE_LR4: media = I40E_MEDIA_TYPE_FIBER; break; case I40E_PHY_TYPE_100BASE_TX: case I40E_PHY_TYPE_1000BASE_T: case I40E_PHY_TYPE_10GBASE_T: media = I40E_MEDIA_TYPE_BASET; break; case I40E_PHY_TYPE_10GBASE_CR1_CU: case I40E_PHY_TYPE_40GBASE_CR4_CU: case I40E_PHY_TYPE_10GBASE_CR1: case I40E_PHY_TYPE_40GBASE_CR4: case I40E_PHY_TYPE_10GBASE_SFPP_CU: case I40E_PHY_TYPE_40GBASE_AOC: case I40E_PHY_TYPE_10GBASE_AOC: media = I40E_MEDIA_TYPE_DA; break; case I40E_PHY_TYPE_1000BASE_KX: case I40E_PHY_TYPE_10GBASE_KX4: case I40E_PHY_TYPE_10GBASE_KR: case I40E_PHY_TYPE_40GBASE_KR4: case I40E_PHY_TYPE_20GBASE_KR2: media = I40E_MEDIA_TYPE_BACKPLANE; break; case I40E_PHY_TYPE_SGMII: case I40E_PHY_TYPE_XAUI: case I40E_PHY_TYPE_XFI: case I40E_PHY_TYPE_XLAUI: case I40E_PHY_TYPE_XLPPI: default: media = I40E_MEDIA_TYPE_UNKNOWN; break; } return media; } #define I40E_PF_RESET_WAIT_COUNT 200 /** * i40e_pf_reset - Reset the PF * @hw: pointer to the hardware structure * * Assuming someone else has triggered a global reset, * assure the global reset is complete and then reset the PF **/ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw) { u32 cnt = 0; u32 cnt1 = 0; u32 reg = 0; u32 grst_del; /* Poll for Global Reset steady state in case of recent GRST. * The grst delay value is in 100ms units, and we'll wait a * couple counts longer to be sure we don't just miss the end. */ grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; for (cnt = 0; cnt < grst_del + 10; cnt++) { reg = rd32(hw, I40E_GLGEN_RSTAT); if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) break; i40e_msec_delay(100); } if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { DEBUGOUT("Global reset polling failed to complete.\n"); return I40E_ERR_RESET_FAILED; } /* Now Wait for the FW to be ready */ for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { reg = rd32(hw, I40E_GLNVM_ULD); reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { DEBUGOUT1("Core and Global modules ready %d\n", cnt1); break; } i40e_msec_delay(10); } if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { DEBUGOUT("wait for FW Reset complete timedout\n"); DEBUGOUT1("I40E_GLNVM_ULD = 0x%x\n", reg); return I40E_ERR_RESET_FAILED; } /* If there was a Global Reset in progress when we got here, * we don't need to do the PF Reset */ if (!cnt) { reg = rd32(hw, I40E_PFGEN_CTRL); wr32(hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) { reg = rd32(hw, I40E_PFGEN_CTRL); if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) break; i40e_msec_delay(1); } if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { DEBUGOUT("PF reset polling failed to complete.\n"); return I40E_ERR_RESET_FAILED; } } i40e_clear_pxe_mode(hw); return I40E_SUCCESS; } /** * i40e_clear_hw - clear out any left over hw state * @hw: pointer to the hw struct * * Clear queues and interrupts, typically called at init time, * but after the capabilities have been found so we know how many * queues and msix vectors have been allocated. **/ void i40e_clear_hw(struct i40e_hw *hw) { u32 num_queues, base_queue; u32 num_pf_int; u32 num_vf_int; u32 num_vfs; u32 i, j; u32 val; u32 eol = 0x7ff; /* get number of interrupts, queues, and vfs */ val = rd32(hw, I40E_GLPCI_CNF2); num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; val = rd32(hw, I40E_PFLAN_QALLOC); base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> I40E_PFLAN_QALLOC_LASTQ_SHIFT; if (val & I40E_PFLAN_QALLOC_VALID_MASK) num_queues = (j - base_queue) + 1; else num_queues = 0; val = rd32(hw, I40E_PF_VT_PFALLOC); i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> I40E_PF_VT_PFALLOC_LASTVF_SHIFT; if (val & I40E_PF_VT_PFALLOC_VALID_MASK) num_vfs = (j - i) + 1; else num_vfs = 0; /* stop all the interrupts */ wr32(hw, I40E_PFINT_ICR0_ENA, 0); val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; for (i = 0; i < num_pf_int - 2; i++) wr32(hw, I40E_PFINT_DYN_CTLN(i), val); /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; wr32(hw, I40E_PFINT_LNKLST0, val); for (i = 0; i < num_pf_int - 2; i++) wr32(hw, I40E_PFINT_LNKLSTN(i), val); val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; for (i = 0; i < num_vfs; i++) wr32(hw, I40E_VPINT_LNKLST0(i), val); for (i = 0; i < num_vf_int - 2; i++) wr32(hw, I40E_VPINT_LNKLSTN(i), val); /* warn the HW of the coming Tx disables */ for (i = 0; i < num_queues; i++) { u32 abs_queue_idx = base_queue + i; u32 reg_block = 0; if (abs_queue_idx >= 128) { reg_block = abs_queue_idx / 128; abs_queue_idx %= 128; } val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val); } i40e_usec_delay(400); /* stop all the queues */ for (i = 0; i < num_queues; i++) { wr32(hw, I40E_QINT_TQCTL(i), 0); wr32(hw, I40E_QTX_ENA(i), 0); wr32(hw, I40E_QINT_RQCTL(i), 0); wr32(hw, I40E_QRX_ENA(i), 0); } /* short wait for all queue disables to settle */ i40e_usec_delay(50); } /** * i40e_clear_pxe_mode - clear pxe operations mode * @hw: pointer to the hw struct * * Make sure all PXE mode settings are cleared, including things * like descriptor fetch/write-back mode. **/ void i40e_clear_pxe_mode(struct i40e_hw *hw) { if (i40e_check_asq_alive(hw)) i40e_aq_clear_pxe_mode(hw, NULL); } /** * i40e_led_is_mine - helper to find matching led * @hw: pointer to the hw struct * @idx: index into GPIO registers * * returns: 0 if no match, otherwise the value of the GPIO_CTL register */ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) { u32 gpio_val = 0; u32 port; if (!hw->func_caps.led[idx]) return 0; gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; /* if PRT_NUM_NA is 1 then this LED is not port specific, OR * if it is not our port then ignore */ if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || (port != hw->port)) return 0; return gpio_val; } #define I40E_COMBINED_ACTIVITY 0xA #define I40E_FILTER_ACTIVITY 0xE #define I40E_LINK_ACTIVITY 0xC #define I40E_MAC_ACTIVITY 0xD #define I40E_LED0 22 /** * i40e_led_get - return current on/off mode * @hw: pointer to the hw struct * * The value returned is the 'mode' field as defined in the * GPIO register definitions: 0x0 = off, 0xf = on, and other * values are variations of possible behaviors relating to * blink, link, and wire. **/ u32 i40e_led_get(struct i40e_hw *hw) { u32 current_mode = 0; u32 mode = 0; int i; /* as per the documentation GPIO 22-29 are the LED * GPIO pins named LED0..LED7 */ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { u32 gpio_val = i40e_led_is_mine(hw, i); if (!gpio_val) continue; /* ignore gpio LED src mode entries related to the activity * LEDs */ current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); switch (current_mode) { case I40E_COMBINED_ACTIVITY: case I40E_FILTER_ACTIVITY: case I40E_MAC_ACTIVITY: continue; default: break; } mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; break; } return mode; } /** * i40e_led_set - set new on/off mode * @hw: pointer to the hw struct * @mode: 0=off, 0xf=on (else see manual for mode details) * @blink: TRUE if the LED should blink when on, FALSE if steady * * if this function is used to turn on the blink it should * be used to disable the blink when restoring the original state. **/ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) { u32 current_mode = 0; int i; if (mode & 0xfffffff0) DEBUGOUT1("invalid mode passed in %X\n", mode); /* as per the documentation GPIO 22-29 are the LED * GPIO pins named LED0..LED7 */ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { u32 gpio_val = i40e_led_is_mine(hw, i); if (!gpio_val) continue; /* ignore gpio LED src mode entries related to the activity * LEDs */ current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); switch (current_mode) { case I40E_COMBINED_ACTIVITY: case I40E_FILTER_ACTIVITY: case I40E_MAC_ACTIVITY: continue; default: break; } gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; /* this & is a bit of paranoia, but serves as a range check */ gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); if (mode == I40E_LINK_ACTIVITY) blink = FALSE; if (blink) gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); else gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); break; } } /* Admin command wrappers */ /** * i40e_aq_get_phy_capabilities * @hw: pointer to the hw struct * @abilities: structure for PHY capabilities to be filled * @qualified_modules: report Qualified Modules * @report_init: report init capabilities (active are default) * @cmd_details: pointer to command details structure or NULL * * Returns the various PHY abilities supported on the Port. **/ enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw, bool qualified_modules, bool report_init, struct i40e_aq_get_phy_abilities_resp *abilities, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; enum i40e_status_code status; u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); if (!abilities) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_phy_abilities); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (abilities_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); if (qualified_modules) desc.params.external.param0 |= CPU_TO_LE32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); if (report_init) desc.params.external.param0 |= CPU_TO_LE32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); status = i40e_asq_send_command(hw, &desc, abilities, abilities_size, cmd_details); if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) status = I40E_ERR_UNKNOWN_PHY; if (report_init) hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type); return status; } /** * i40e_aq_set_phy_config * @hw: pointer to the hw struct * @config: structure with PHY configuration to be set * @cmd_details: pointer to command details structure or NULL * * Set the various PHY configuration parameters * supported on the Port.One or more of the Set PHY config parameters may be * ignored in an MFP mode as the PF may not have the privilege to set some * of the PHY Config parameters. This status will be indicated by the * command response. **/ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, struct i40e_aq_set_phy_config *config, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aq_set_phy_config *cmd = (struct i40e_aq_set_phy_config *)&desc.params.raw; enum i40e_status_code status; if (!config) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_phy_config); *cmd = *config; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_set_fc * @hw: pointer to the hw struct * * Set the requested flow control mode using set_phy_config. **/ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, bool atomic_restart) { enum i40e_fc_mode fc_mode = hw->fc.requested_mode; struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_aq_set_phy_config config; enum i40e_status_code status; u8 pause_mask = 0x0; *aq_failures = 0x0; switch (fc_mode) { case I40E_FC_FULL: pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; break; case I40E_FC_RX_PAUSE: pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; break; case I40E_FC_TX_PAUSE: pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; break; default: break; } /* Get the current phy config */ status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities, NULL); if (status) { *aq_failures |= I40E_SET_FC_AQ_FAIL_GET; return status; } memset(&config, 0, sizeof(config)); /* clear the old pause settings */ config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & ~(I40E_AQ_PHY_FLAG_PAUSE_RX); /* set the new abilities */ config.abilities |= pause_mask; /* If the abilities have changed, then set the new config */ if (config.abilities != abilities.abilities) { /* Auto restart link so settings take effect */ if (atomic_restart) config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; /* Copy over all the old settings */ config.phy_type = abilities.phy_type; config.link_speed = abilities.link_speed; config.eee_capability = abilities.eee_capability; config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; status = i40e_aq_set_phy_config(hw, &config, NULL); if (status) *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; } /* Update the link info */ status = i40e_update_link_info(hw); if (status) { /* Wait a little bit (on 40G cards it sometimes takes a really * long time for link to come back from the atomic reset) * and try once more */ i40e_msec_delay(1000); status = i40e_update_link_info(hw); } if (status) *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; return status; } /** * i40e_aq_set_mac_config * @hw: pointer to the hw struct * @max_frame_size: Maximum Frame Size to be supported by the port * @crc_en: Tell HW to append a CRC to outgoing frames * @pacing: Pacing configurations * @cmd_details: pointer to command details structure or NULL * * Configure MAC settings for frame size, jumbo frame support and the * addition of a CRC by the hardware. **/ enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw, u16 max_frame_size, bool crc_en, u16 pacing, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aq_set_mac_config *cmd = (struct i40e_aq_set_mac_config *)&desc.params.raw; enum i40e_status_code status; if (max_frame_size == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_mac_config); cmd->max_frame_size = CPU_TO_LE16(max_frame_size); cmd->params = ((u8)pacing & 0x0F) << 3; if (crc_en) cmd->params |= I40E_AQ_SET_MAC_CONFIG_CRC_EN; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_clear_pxe_mode * @hw: pointer to the hw struct * @cmd_details: pointer to command details structure or NULL * * Tell the firmware that the driver is taking over from PXE **/ enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details) { enum i40e_status_code status; struct i40e_aq_desc desc; struct i40e_aqc_clear_pxe *cmd = (struct i40e_aqc_clear_pxe *)&desc.params.raw; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_clear_pxe_mode); cmd->rx_cnt = 0x2; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); wr32(hw, I40E_GLLAN_RCTL_0, 0x1); return status; } /** * i40e_aq_set_link_restart_an * @hw: pointer to the hw struct * @enable_link: if TRUE: enable link, if FALSE: disable link * @cmd_details: pointer to command details structure or NULL * * Sets up the link and restarts the Auto-Negotiation over the link. **/ enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw, bool enable_link, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_link_restart_an *cmd = (struct i40e_aqc_set_link_restart_an *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_link_restart_an); cmd->command = I40E_AQ_PHY_RESTART_AN; if (enable_link) cmd->command |= I40E_AQ_PHY_LINK_ENABLE; else cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_link_info * @hw: pointer to the hw struct * @enable_lse: enable/disable LinkStatusEvent reporting * @link: pointer to link status structure - optional * @cmd_details: pointer to command details structure or NULL * * Returns the link status of the adapter. **/ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw, bool enable_lse, struct i40e_link_status *link, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_link_status *resp = (struct i40e_aqc_get_link_status *)&desc.params.raw; struct i40e_link_status *hw_link_info = &hw->phy.link_info; enum i40e_status_code status; bool tx_pause, rx_pause; u16 command_flags; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); if (enable_lse) command_flags = I40E_AQ_LSE_ENABLE; else command_flags = I40E_AQ_LSE_DISABLE; resp->command_flags = CPU_TO_LE16(command_flags); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status != I40E_SUCCESS) goto aq_get_link_info_exit; /* save off old link status information */ i40e_memcpy(&hw->phy.link_info_old, hw_link_info, sizeof(*hw_link_info), I40E_NONDMA_TO_NONDMA); /* update link status */ hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; hw->phy.media_type = i40e_get_media_type(hw); hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; hw_link_info->link_info = resp->link_info; hw_link_info->an_info = resp->an_info; hw_link_info->ext_info = resp->ext_info; hw_link_info->loopback = resp->loopback; hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size); hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; /* update fc info */ tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX); rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX); if (tx_pause & rx_pause) hw->fc.current_mode = I40E_FC_FULL; else if (tx_pause) hw->fc.current_mode = I40E_FC_TX_PAUSE; else if (rx_pause) hw->fc.current_mode = I40E_FC_RX_PAUSE; else hw->fc.current_mode = I40E_FC_NONE; if (resp->config & I40E_AQ_CONFIG_CRC_ENA) hw_link_info->crc_enable = TRUE; else hw_link_info->crc_enable = FALSE; if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_ENABLE)) hw_link_info->lse_enable = TRUE; else hw_link_info->lse_enable = FALSE; if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; /* save link status information */ if (link) i40e_memcpy(link, hw_link_info, sizeof(*hw_link_info), I40E_NONDMA_TO_NONDMA); /* flag cleared so helper functions don't call AQ again */ hw->phy.get_link_info = FALSE; aq_get_link_info_exit: return status; } /** * i40e_aq_set_phy_int_mask * @hw: pointer to the hw struct * @mask: interrupt mask to be set * @cmd_details: pointer to command details structure or NULL * * Set link interrupt mask. **/ enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_phy_int_mask *cmd = (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_phy_int_mask); cmd->event_mask = CPU_TO_LE16(mask); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_local_advt_reg * @hw: pointer to the hw struct * @advt_reg: local AN advertisement register value * @cmd_details: pointer to command details structure or NULL * * Get the Local AN advertisement register value. **/ enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw, u64 *advt_reg, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_an_advt_reg *resp = (struct i40e_aqc_an_advt_reg *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_local_advt_reg); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status != I40E_SUCCESS) goto aq_get_local_advt_reg_exit; *advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32; *advt_reg |= LE32_TO_CPU(resp->local_an_reg0); aq_get_local_advt_reg_exit: return status; } /** * i40e_aq_set_local_advt_reg * @hw: pointer to the hw struct * @advt_reg: local AN advertisement register value * @cmd_details: pointer to command details structure or NULL * * Get the Local AN advertisement register value. **/ enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw, u64 advt_reg, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_an_advt_reg *cmd = (struct i40e_aqc_an_advt_reg *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_local_advt_reg); cmd->local_an_reg0 = CPU_TO_LE32(I40E_LO_DWORD(advt_reg)); cmd->local_an_reg1 = CPU_TO_LE16(I40E_HI_DWORD(advt_reg)); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_partner_advt * @hw: pointer to the hw struct * @advt_reg: AN partner advertisement register value * @cmd_details: pointer to command details structure or NULL * * Get the link partner AN advertisement register value. **/ enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw, u64 *advt_reg, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_an_advt_reg *resp = (struct i40e_aqc_an_advt_reg *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_partner_advt); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status != I40E_SUCCESS) goto aq_get_partner_advt_exit; *advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32; *advt_reg |= LE32_TO_CPU(resp->local_an_reg0); aq_get_partner_advt_exit: return status; } /** * i40e_aq_set_lb_modes * @hw: pointer to the hw struct * @lb_modes: loopback mode to be set * @cmd_details: pointer to command details structure or NULL * * Sets loopback modes. **/ enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw, u16 lb_modes, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_lb_mode *cmd = (struct i40e_aqc_set_lb_mode *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_lb_modes); cmd->lb_mode = CPU_TO_LE16(lb_modes); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_phy_debug * @hw: pointer to the hw struct * @cmd_flags: debug command flags * @cmd_details: pointer to command details structure or NULL * * Reset the external PHY. **/ enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_phy_debug *cmd = (struct i40e_aqc_set_phy_debug *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_phy_debug); cmd->command_flags = cmd_flags; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_add_vsi * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct * @cmd_details: pointer to command details structure or NULL * * Add a VSI context to the hardware. **/ enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_get_update_vsi *cmd = (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; struct i40e_aqc_add_get_update_vsi_completion *resp = (struct i40e_aqc_add_get_update_vsi_completion *) &desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vsi); cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->uplink_seid); cmd->connection_type = vsi_ctx->connection_type; cmd->vf_id = vsi_ctx->vf_num; cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), cmd_details); if (status != I40E_SUCCESS) goto aq_add_vsi_exit; vsi_ctx->seid = LE16_TO_CPU(resp->seid); vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number); vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used); vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free); aq_add_vsi_exit: return status; } /** * i40e_aq_set_default_vsi * @hw: pointer to the hw struct * @seid: vsi number * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 seid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *) &desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); cmd->promiscuous_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT); cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_unicast_promiscuous * @hw: pointer to the hw struct * @seid: vsi number * @set: set unicast promiscuous enable/disable * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; enum i40e_status_code status; u16 flags = 0; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (set) flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; cmd->promiscuous_flags = CPU_TO_LE16(flags); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST); cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_multicast_promiscuous * @hw: pointer to the hw struct * @seid: vsi number * @set: set multicast promiscuous enable/disable * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; enum i40e_status_code status; u16 flags = 0; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (set) flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; cmd->promiscuous_flags = CPU_TO_LE16(flags); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_mc_promisc_on_vlan * @hw: pointer to the hw struct * @seid: vsi number * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, u16 seid, bool enable, u16 vid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; enum i40e_status_code status; u16 flags = 0; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (enable) flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; cmd->promiscuous_flags = CPU_TO_LE16(flags); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); cmd->seid = CPU_TO_LE16(seid); cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_uc_promisc_on_vlan * @hw: pointer to the hw struct * @seid: vsi number * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, u16 seid, bool enable, u16 vid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; enum i40e_status_code status; u16 flags = 0; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (enable) flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; cmd->promiscuous_flags = CPU_TO_LE16(flags); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST); cmd->seid = CPU_TO_LE16(seid); cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_broadcast * @hw: pointer to the hw struct * @seid: vsi number * @set_filter: TRUE to set filter, FALSE to clear filter * @cmd_details: pointer to command details structure or NULL * * Set or clear the broadcast promiscuous flag (filter) for a given VSI. **/ enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, u16 seid, bool set_filter, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (set_filter) cmd->promiscuous_flags |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); else cmd->promiscuous_flags &= CPU_TO_LE16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_get_vsi_params - get VSI configuration info * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_get_update_vsi *cmd = (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; struct i40e_aqc_add_get_update_vsi_completion *resp = (struct i40e_aqc_add_get_update_vsi_completion *) &desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_vsi_parameters); cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), NULL); if (status != I40E_SUCCESS) goto aq_get_vsi_params_exit; vsi_ctx->seid = LE16_TO_CPU(resp->seid); vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number); vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used); vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free); aq_get_vsi_params_exit: return status; } /** * i40e_aq_update_vsi_params * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct * @cmd_details: pointer to command details structure or NULL * * Update a VSI context. **/ enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_get_update_vsi *cmd = (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_update_vsi_parameters); cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), cmd_details); return status; } /** * i40e_aq_get_switch_config * @hw: pointer to the hardware structure * @buf: pointer to the result buffer * @buf_size: length of input buffer * @start_seid: seid to start for the report, 0 == beginning * @cmd_details: pointer to command details structure or NULL * * Fill the buf with switch configuration returned from AdminQ command **/ enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw, struct i40e_aqc_get_switch_config_resp *buf, u16 buf_size, u16 *start_seid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_switch_seid *scfg = (struct i40e_aqc_switch_seid *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_switch_config); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); scfg->seid = CPU_TO_LE16(*start_seid); status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details); *start_seid = LE16_TO_CPU(scfg->seid); return status; } /** * i40e_aq_get_firmware_version * @hw: pointer to the hw struct * @fw_major_version: firmware major version * @fw_minor_version: firmware minor version * @fw_build: firmware build number * @api_major_version: major queue version * @api_minor_version: minor queue version * @cmd_details: pointer to command details structure or NULL * * Get the firmware version from the admin queue commands **/ enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw, u16 *fw_major_version, u16 *fw_minor_version, u32 *fw_build, u16 *api_major_version, u16 *api_minor_version, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_version *resp = (struct i40e_aqc_get_version *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status == I40E_SUCCESS) { if (fw_major_version != NULL) *fw_major_version = LE16_TO_CPU(resp->fw_major); if (fw_minor_version != NULL) *fw_minor_version = LE16_TO_CPU(resp->fw_minor); if (fw_build != NULL) *fw_build = LE32_TO_CPU(resp->fw_build); if (api_major_version != NULL) *api_major_version = LE16_TO_CPU(resp->api_major); if (api_minor_version != NULL) *api_minor_version = LE16_TO_CPU(resp->api_minor); /* A workaround to fix the API version in SW */ if (api_major_version && api_minor_version && fw_major_version && fw_minor_version && ((*api_major_version == 1) && (*api_minor_version == 1)) && (((*fw_major_version == 4) && (*fw_minor_version >= 2)) || (*fw_major_version > 4))) *api_minor_version = 2; } return status; } /** * i40e_aq_send_driver_version * @hw: pointer to the hw struct * @dv: driver's major, minor version * @cmd_details: pointer to command details structure or NULL * * Send the driver version to the firmware **/ enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw, struct i40e_driver_version *dv, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_driver_version *cmd = (struct i40e_aqc_driver_version *)&desc.params.raw; enum i40e_status_code status; u16 len; if (dv == NULL) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); cmd->driver_major_ver = dv->major_version; cmd->driver_minor_ver = dv->minor_version; cmd->driver_build_ver = dv->build_version; cmd->driver_subbuild_ver = dv->subbuild_version; len = 0; while (len < sizeof(dv->driver_string) && (dv->driver_string[len] < 0x80) && dv->driver_string[len]) len++; status = i40e_asq_send_command(hw, &desc, dv->driver_string, len, cmd_details); return status; } /** * i40e_get_link_status - get status of the HW network link * @hw: pointer to the hw struct * @link_up: pointer to bool (TRUE/FALSE = linkup/linkdown) * * Variable link_up TRUE if link is up, FALSE if link is down. * The variable link_up is invalid if returned value of status != I40E_SUCCESS * * Side effect: LinkStatusEvent reporting becomes enabled **/ enum i40e_status_code i40e_get_link_status(struct i40e_hw *hw, bool *link_up) { enum i40e_status_code status = I40E_SUCCESS; if (hw->phy.get_link_info) { status = i40e_update_link_info(hw); if (status != I40E_SUCCESS) i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n", status); } *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; return status; } /** * i40e_updatelink_status - update status of the HW network link * @hw: pointer to the hw struct **/ enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw) { struct i40e_aq_get_phy_abilities_resp abilities; enum i40e_status_code status = I40E_SUCCESS; status = i40e_aq_get_link_info(hw, TRUE, NULL, NULL); if (status) return status; if (hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) { status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities, NULL); if (status) return status; memcpy(hw->phy.link_info.module_type, &abilities.module_type, sizeof(hw->phy.link_info.module_type)); } return status; } /** * i40e_get_link_speed * @hw: pointer to the hw struct * * Returns the link speed of the adapter. **/ enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw) { enum i40e_aq_link_speed speed = I40E_LINK_SPEED_UNKNOWN; enum i40e_status_code status = I40E_SUCCESS; if (hw->phy.get_link_info) { status = i40e_aq_get_link_info(hw, TRUE, NULL, NULL); if (status != I40E_SUCCESS) goto i40e_link_speed_exit; } speed = hw->phy.link_info.link_speed; i40e_link_speed_exit: return speed; } /** * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC * @hw: pointer to the hw struct * @uplink_seid: the MAC or other gizmo SEID * @downlink_seid: the VSI SEID * @enabled_tc: bitmap of TCs to be enabled * @default_port: TRUE for default port VSI, FALSE for control port * @enable_l2_filtering: TRUE to add L2 filter table rules to regular forwarding rules for cloud support * @veb_seid: pointer to where to put the resulting VEB SEID * @cmd_details: pointer to command details structure or NULL * * This asks the FW to add a VEB between the uplink and downlink * elements. If the uplink SEID is 0, this will be a floating VEB. **/ enum i40e_status_code i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, u16 downlink_seid, u8 enabled_tc, bool default_port, bool enable_l2_filtering, u16 *veb_seid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_veb *cmd = (struct i40e_aqc_add_veb *)&desc.params.raw; struct i40e_aqc_add_veb_completion *resp = (struct i40e_aqc_add_veb_completion *)&desc.params.raw; enum i40e_status_code status; u16 veb_flags = 0; /* SEIDs need to either both be set or both be 0 for floating VEB */ if (!!uplink_seid != !!downlink_seid) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); cmd->uplink_seid = CPU_TO_LE16(uplink_seid); cmd->downlink_seid = CPU_TO_LE16(downlink_seid); cmd->enable_tcs = enabled_tc; if (!uplink_seid) veb_flags |= I40E_AQC_ADD_VEB_FLOATING; if (default_port) veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT; else veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; if (enable_l2_filtering) veb_flags |= I40E_AQC_ADD_VEB_ENABLE_L2_FILTER; cmd->veb_flags = CPU_TO_LE16(veb_flags); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status && veb_seid) *veb_seid = LE16_TO_CPU(resp->veb_seid); return status; } /** * i40e_aq_get_veb_parameters - Retrieve VEB parameters * @hw: pointer to the hw struct * @veb_seid: the SEID of the VEB to query * @switch_id: the uplink switch id * @floating: set to TRUE if the VEB is floating * @statistic_index: index of the stats counter block for this VEB * @vebs_used: number of VEB's used by function * @vebs_free: total VEB's not reserved by any function * @cmd_details: pointer to command details structure or NULL * * This retrieves the parameters for a particular VEB, specified by * uplink_seid, and returns them to the caller. **/ enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw, u16 veb_seid, u16 *switch_id, bool *floating, u16 *statistic_index, u16 *vebs_used, u16 *vebs_free, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_veb_parameters_completion *cmd_resp = (struct i40e_aqc_get_veb_parameters_completion *) &desc.params.raw; enum i40e_status_code status; if (veb_seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_veb_parameters); cmd_resp->seid = CPU_TO_LE16(veb_seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status) goto get_veb_exit; if (switch_id) *switch_id = LE16_TO_CPU(cmd_resp->switch_id); if (statistic_index) *statistic_index = LE16_TO_CPU(cmd_resp->statistic_index); if (vebs_used) *vebs_used = LE16_TO_CPU(cmd_resp->vebs_used); if (vebs_free) *vebs_free = LE16_TO_CPU(cmd_resp->vebs_free); if (floating) { u16 flags = LE16_TO_CPU(cmd_resp->veb_flags); if (flags & I40E_AQC_ADD_VEB_FLOATING) *floating = TRUE; else *floating = FALSE; } get_veb_exit: return status; } /** * i40e_aq_add_macvlan * @hw: pointer to the hw struct * @seid: VSI for the mac address * @mv_list: list of macvlans to be added * @count: length of the list * @cmd_details: pointer to command details structure or NULL * * Add MAC/VLAN addresses to the HW filtering **/ enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, struct i40e_aqc_add_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_macvlan *cmd = (struct i40e_aqc_macvlan *)&desc.params.raw; enum i40e_status_code status; u16 buf_size; + int i; if (count == 0 || !mv_list || !hw) return I40E_ERR_PARAM; buf_size = count * sizeof(*mv_list); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan); cmd->num_addresses = CPU_TO_LE16(count); cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); cmd->seid[1] = 0; cmd->seid[2] = 0; + for (i = 0; i < count; i++) + if (I40E_IS_MULTICAST(mv_list[i].mac_addr)) + mv_list[i].flags |= I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC; + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, - cmd_details); + cmd_details); return status; } /** * i40e_aq_remove_macvlan * @hw: pointer to the hw struct * @seid: VSI for the mac address * @mv_list: list of macvlans to be removed * @count: length of the list * @cmd_details: pointer to command details structure or NULL * * Remove MAC/VLAN addresses from the HW filtering **/ enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, struct i40e_aqc_remove_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_macvlan *cmd = (struct i40e_aqc_macvlan *)&desc.params.raw; enum i40e_status_code status; u16 buf_size; if (count == 0 || !mv_list || !hw) return I40E_ERR_PARAM; buf_size = count * sizeof(*mv_list); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); cmd->num_addresses = CPU_TO_LE16(count); cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); cmd->seid[1] = 0; cmd->seid[2] = 0; desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, cmd_details); return status; } /** * i40e_aq_add_vlan - Add VLAN ids to the HW filtering * @hw: pointer to the hw struct * @seid: VSI for the vlan filters * @v_list: list of vlan filters to be added * @count: length of the list * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid, struct i40e_aqc_add_remove_vlan_element_data *v_list, u8 count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_macvlan *cmd = (struct i40e_aqc_macvlan *)&desc.params.raw; enum i40e_status_code status; u16 buf_size; if (count == 0 || !v_list || !hw) return I40E_ERR_PARAM; buf_size = count * sizeof(*v_list); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan); cmd->num_addresses = CPU_TO_LE16(count); cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID); cmd->seid[1] = 0; cmd->seid[2] = 0; desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, v_list, buf_size, cmd_details); return status; } /** * i40e_aq_remove_vlan - Remove VLANs from the HW filtering * @hw: pointer to the hw struct * @seid: VSI for the vlan filters * @v_list: list of macvlans to be removed * @count: length of the list * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid, struct i40e_aqc_add_remove_vlan_element_data *v_list, u8 count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_macvlan *cmd = (struct i40e_aqc_macvlan *)&desc.params.raw; enum i40e_status_code status; u16 buf_size; if (count == 0 || !v_list || !hw) return I40E_ERR_PARAM; buf_size = count * sizeof(*v_list); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan); cmd->num_addresses = CPU_TO_LE16(count); cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID); cmd->seid[1] = 0; cmd->seid[2] = 0; desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, v_list, buf_size, cmd_details); return status; } /** * i40e_aq_send_msg_to_vf * @hw: pointer to the hardware structure * @vfid: vf id to send msg * @v_opcode: opcodes for VF-PF communication * @v_retval: return error code * @msg: pointer to the msg buffer * @msglen: msg length * @cmd_details: pointer to command details * * send msg to vf **/ enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_pf_vf_message *cmd = (struct i40e_aqc_pf_vf_message *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); cmd->id = CPU_TO_LE32(vfid); desc.cookie_high = CPU_TO_LE32(v_opcode); desc.cookie_low = CPU_TO_LE32(v_retval); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI); if (msglen) { desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (msglen > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); desc.datalen = CPU_TO_LE16(msglen); } status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details); return status; } /** * i40e_aq_debug_read_register * @hw: pointer to the hw struct * @reg_addr: register address * @reg_val: register value * @cmd_details: pointer to command details structure or NULL * * Read the register using the admin queue commands **/ enum i40e_status_code i40e_aq_debug_read_register(struct i40e_hw *hw, u32 reg_addr, u64 *reg_val, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_debug_reg_read_write *cmd_resp = (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; enum i40e_status_code status; if (reg_val == NULL) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); cmd_resp->address = CPU_TO_LE32(reg_addr); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status == I40E_SUCCESS) { *reg_val = ((u64)LE32_TO_CPU(cmd_resp->value_high) << 32) | (u64)LE32_TO_CPU(cmd_resp->value_low); } return status; } /** * i40e_aq_debug_write_register * @hw: pointer to the hw struct * @reg_addr: register address * @reg_val: register value * @cmd_details: pointer to command details structure or NULL * * Write to a register using the admin queue commands **/ enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw, u32 reg_addr, u64 reg_val, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_debug_reg_read_write *cmd = (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); cmd->address = CPU_TO_LE32(reg_addr); cmd->value_high = CPU_TO_LE32((u32)(reg_val >> 32)); cmd->value_low = CPU_TO_LE32((u32)(reg_val & 0xFFFFFFFF)); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_hmc_resource_profile * @hw: pointer to the hw struct * @profile: type of profile the HMC is to be set as * @pe_vf_enabled_count: the number of PE enabled VFs the system has * @cmd_details: pointer to command details structure or NULL * * query the HMC profile of the device. **/ enum i40e_status_code i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw, enum i40e_aq_hmc_profile *profile, u8 *pe_vf_enabled_count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aq_get_set_hmc_resource_profile *resp = (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_query_hmc_resource_profile); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); *profile = (enum i40e_aq_hmc_profile)(resp->pm_profile & I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK); *pe_vf_enabled_count = resp->pe_vf_enabled & I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK; return status; } /** * i40e_aq_set_hmc_resource_profile * @hw: pointer to the hw struct * @profile: type of profile the HMC is to be set as * @pe_vf_enabled_count: the number of PE enabled VFs the system has * @cmd_details: pointer to command details structure or NULL * * set the HMC profile of the device. **/ enum i40e_status_code i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw, enum i40e_aq_hmc_profile profile, u8 pe_vf_enabled_count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aq_get_set_hmc_resource_profile *cmd = (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_hmc_resource_profile); cmd->pm_profile = (u8)profile; cmd->pe_vf_enabled = pe_vf_enabled_count; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_request_resource * @hw: pointer to the hw struct * @resource: resource id * @access: access type * @sdp_number: resource number * @timeout: the maximum time in ms that the driver may hold the resource * @cmd_details: pointer to command details structure or NULL * * requests common resource using the admin queue commands **/ enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw, enum i40e_aq_resources_ids resource, enum i40e_aq_resource_access_type access, u8 sdp_number, u64 *timeout, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_request_resource *cmd_resp = (struct i40e_aqc_request_resource *)&desc.params.raw; enum i40e_status_code status; DEBUGFUNC("i40e_aq_request_resource"); i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); cmd_resp->resource_id = CPU_TO_LE16(resource); cmd_resp->access_type = CPU_TO_LE16(access); cmd_resp->resource_number = CPU_TO_LE32(sdp_number); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); /* The completion specifies the maximum time in ms that the driver * may hold the resource in the Timeout field. * If the resource is held by someone else, the command completes with * busy return value and the timeout field indicates the maximum time * the current owner of the resource has to free it. */ if (status == I40E_SUCCESS || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) *timeout = LE32_TO_CPU(cmd_resp->timeout); return status; } /** * i40e_aq_release_resource * @hw: pointer to the hw struct * @resource: resource id * @sdp_number: resource number * @cmd_details: pointer to command details structure or NULL * * release common resource using the admin queue commands **/ enum i40e_status_code i40e_aq_release_resource(struct i40e_hw *hw, enum i40e_aq_resources_ids resource, u8 sdp_number, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_request_resource *cmd = (struct i40e_aqc_request_resource *)&desc.params.raw; enum i40e_status_code status; DEBUGFUNC("i40e_aq_release_resource"); i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); cmd->resource_id = CPU_TO_LE16(resource); cmd->resource_number = CPU_TO_LE32(sdp_number); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_read_nvm * @hw: pointer to the hw struct * @module_pointer: module pointer location in words from the NVM beginning * @offset: byte offset from the module beginning * @length: length of the section to be read (in bytes from the offset) * @data: command buffer (size [bytes] = length) * @last_command: tells if this is the last command in a series * @cmd_details: pointer to command details structure or NULL * * Read the NVM using the admin queue commands **/ enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, void *data, bool last_command, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_nvm_update *cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; enum i40e_status_code status; DEBUGFUNC("i40e_aq_read_nvm"); /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) { status = I40E_ERR_PARAM; goto i40e_aq_read_nvm_exit; } i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read); /* If this is the last command in a series, set the proper flag. */ if (last_command) cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; cmd->module_pointer = module_pointer; cmd->offset = CPU_TO_LE32(offset); cmd->length = CPU_TO_LE16(length); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (length > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); i40e_aq_read_nvm_exit: return status; } /** * i40e_aq_read_nvm_config - read an nvm config block * @hw: pointer to the hw struct * @cmd_flags: NVM access admin command bits * @field_id: field or feature id * @data: buffer for result * @buf_size: buffer size * @element_count: pointer to count of elements read by FW * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_read_nvm_config(struct i40e_hw *hw, u8 cmd_flags, u32 field_id, void *data, u16 buf_size, u16 *element_count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_nvm_config_read *cmd = (struct i40e_aqc_nvm_config_read *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_read); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); cmd->cmd_flags = CPU_TO_LE16(cmd_flags); cmd->element_id = CPU_TO_LE16((u16)(0xffff & field_id)); if (cmd_flags & I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK) cmd->element_id_msw = CPU_TO_LE16((u16)(field_id >> 16)); else cmd->element_id_msw = 0; status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details); if (!status && element_count) *element_count = LE16_TO_CPU(cmd->element_count); return status; } /** * i40e_aq_write_nvm_config - write an nvm config block * @hw: pointer to the hw struct * @cmd_flags: NVM access admin command bits * @data: buffer for result * @buf_size: buffer size * @element_count: count of elements to be written * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw, u8 cmd_flags, void *data, u16 buf_size, u16 element_count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_nvm_config_write *cmd = (struct i40e_aqc_nvm_config_write *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_write); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); cmd->element_count = CPU_TO_LE16(element_count); cmd->cmd_flags = CPU_TO_LE16(cmd_flags); status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details); return status; } /** * i40e_aq_oem_post_update - triggers an OEM specific flow after update * @hw: pointer to the hw struct * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw, void *buff, u16 buff_size, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_oem_post_update); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status && LE16_TO_CPU(desc.retval) == I40E_AQ_RC_ESRCH) status = I40E_ERR_NOT_IMPLEMENTED; return status; } /** * i40e_aq_erase_nvm * @hw: pointer to the hw struct * @module_pointer: module pointer location in words from the NVM beginning * @offset: offset in the module (expressed in 4 KB from module's beginning) * @length: length of the section to be erased (expressed in 4 KB) * @last_command: tells if this is the last command in a series * @cmd_details: pointer to command details structure or NULL * * Erase the NVM sector using the admin queue commands **/ enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, bool last_command, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_nvm_update *cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; enum i40e_status_code status; DEBUGFUNC("i40e_aq_erase_nvm"); /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) { status = I40E_ERR_PARAM; goto i40e_aq_erase_nvm_exit; } i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase); /* If this is the last command in a series, set the proper flag. */ if (last_command) cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; cmd->module_pointer = module_pointer; cmd->offset = CPU_TO_LE32(offset); cmd->length = CPU_TO_LE16(length); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); i40e_aq_erase_nvm_exit: return status; } /** * i40e_parse_discover_capabilities * @hw: pointer to the hw struct * @buff: pointer to a buffer containing device/function capability records * @cap_count: number of capability records in the list * @list_type_opc: type of capabilities list to parse * * Parse the device/function capabilities list. **/ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, u32 cap_count, enum i40e_admin_queue_opc list_type_opc) { struct i40e_aqc_list_capabilities_element_resp *cap; u32 valid_functions, num_functions; u32 number, logical_id, phys_id; struct i40e_hw_capabilities *p; u8 major_rev; u32 i = 0; u16 id; cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) p = (struct i40e_hw_capabilities *)&hw->dev_caps; else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) p = (struct i40e_hw_capabilities *)&hw->func_caps; else return; for (i = 0; i < cap_count; i++, cap++) { id = LE16_TO_CPU(cap->id); number = LE32_TO_CPU(cap->number); logical_id = LE32_TO_CPU(cap->logical_id); phys_id = LE32_TO_CPU(cap->phys_id); major_rev = cap->major_rev; switch (id) { case I40E_AQ_CAP_ID_SWITCH_MODE: p->switch_mode = number; break; case I40E_AQ_CAP_ID_MNG_MODE: p->management_mode = number; break; case I40E_AQ_CAP_ID_NPAR_ACTIVE: p->npar_enable = number; break; case I40E_AQ_CAP_ID_OS2BMC_CAP: p->os2bmc = number; break; case I40E_AQ_CAP_ID_FUNCTIONS_VALID: p->valid_functions = number; break; case I40E_AQ_CAP_ID_SRIOV: if (number == 1) p->sr_iov_1_1 = TRUE; break; case I40E_AQ_CAP_ID_VF: p->num_vfs = number; p->vf_base_id = logical_id; break; case I40E_AQ_CAP_ID_VMDQ: if (number == 1) p->vmdq = TRUE; break; case I40E_AQ_CAP_ID_8021QBG: if (number == 1) p->evb_802_1_qbg = TRUE; break; case I40E_AQ_CAP_ID_8021QBR: if (number == 1) p->evb_802_1_qbh = TRUE; break; case I40E_AQ_CAP_ID_VSI: p->num_vsis = number; break; case I40E_AQ_CAP_ID_DCB: if (number == 1) { p->dcb = TRUE; p->enabled_tcmap = logical_id; p->maxtc = phys_id; } break; case I40E_AQ_CAP_ID_FCOE: if (number == 1) p->fcoe = TRUE; break; case I40E_AQ_CAP_ID_ISCSI: if (number == 1) p->iscsi = TRUE; break; case I40E_AQ_CAP_ID_RSS: p->rss = TRUE; p->rss_table_size = number; p->rss_table_entry_width = logical_id; break; case I40E_AQ_CAP_ID_RXQ: p->num_rx_qp = number; p->base_queue = phys_id; break; case I40E_AQ_CAP_ID_TXQ: p->num_tx_qp = number; p->base_queue = phys_id; break; case I40E_AQ_CAP_ID_MSIX: p->num_msix_vectors = number; break; case I40E_AQ_CAP_ID_VF_MSIX: p->num_msix_vectors_vf = number; break; case I40E_AQ_CAP_ID_FLEX10: if (major_rev == 1) { if (number == 1) { p->flex10_enable = TRUE; p->flex10_capable = TRUE; } } else { /* Capability revision >= 2 */ if (number & 1) p->flex10_enable = TRUE; if (number & 2) p->flex10_capable = TRUE; } p->flex10_mode = logical_id; p->flex10_status = phys_id; break; case I40E_AQ_CAP_ID_CEM: if (number == 1) p->mgmt_cem = TRUE; break; case I40E_AQ_CAP_ID_IWARP: if (number == 1) p->iwarp = TRUE; break; case I40E_AQ_CAP_ID_LED: if (phys_id < I40E_HW_CAP_MAX_GPIO) p->led[phys_id] = TRUE; break; case I40E_AQ_CAP_ID_SDP: if (phys_id < I40E_HW_CAP_MAX_GPIO) p->sdp[phys_id] = TRUE; break; case I40E_AQ_CAP_ID_MDIO: if (number == 1) { p->mdio_port_num = phys_id; p->mdio_port_mode = logical_id; } break; case I40E_AQ_CAP_ID_1588: if (number == 1) p->ieee_1588 = TRUE; break; case I40E_AQ_CAP_ID_FLOW_DIRECTOR: p->fd = TRUE; p->fd_filters_guaranteed = number; p->fd_filters_best_effort = logical_id; break; case I40E_AQ_CAP_ID_WSR_PROT: p->wr_csr_prot = (u64)number; p->wr_csr_prot |= (u64)logical_id << 32; break; default: break; } } if (p->fcoe) i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); /* Always disable FCoE if compiled without the I40E_FCOE_ENA flag */ p->fcoe = FALSE; /* count the enabled ports (aka the "not disabled" ports) */ hw->num_ports = 0; for (i = 0; i < 4; i++) { u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i); u64 port_cfg = 0; /* use AQ read to get the physical register offset instead * of the port relative offset */ i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK)) hw->num_ports++; } valid_functions = p->valid_functions; num_functions = 0; while (valid_functions) { if (valid_functions & 1) num_functions++; valid_functions >>= 1; } /* partition id is 1-based, and functions are evenly spread * across the ports as partitions */ hw->partition_id = (hw->pf_id / hw->num_ports) + 1; hw->num_partitions = num_functions / hw->num_ports; /* additional HW specific goodies that might * someday be HW version specific */ p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS; } /** * i40e_aq_discover_capabilities * @hw: pointer to the hw struct * @buff: a virtual buffer to hold the capabilities * @buff_size: Size of the virtual buffer * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM * @list_type_opc: capabilities type to discover - pass in the command opcode * @cmd_details: pointer to command details structure or NULL * * Get the device capabilities descriptions from the firmware **/ enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw, void *buff, u16 buff_size, u16 *data_size, enum i40e_admin_queue_opc list_type_opc, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aqc_list_capabilites *cmd; struct i40e_aq_desc desc; enum i40e_status_code status = I40E_SUCCESS; cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; if (list_type_opc != i40e_aqc_opc_list_func_capabilities && list_type_opc != i40e_aqc_opc_list_dev_capabilities) { status = I40E_ERR_PARAM; goto exit; } i40e_fill_default_direct_cmd_desc(&desc, list_type_opc); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); *data_size = LE16_TO_CPU(desc.datalen); if (status) goto exit; i40e_parse_discover_capabilities(hw, buff, LE32_TO_CPU(cmd->count), list_type_opc); exit: return status; } /** * i40e_aq_update_nvm * @hw: pointer to the hw struct * @module_pointer: module pointer location in words from the NVM beginning * @offset: byte offset from the module beginning * @length: length of the section to be written (in bytes from the offset) * @data: command buffer (size [bytes] = length) * @last_command: tells if this is the last command in a series * @cmd_details: pointer to command details structure or NULL * * Update the NVM using the admin queue commands **/ enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, void *data, bool last_command, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_nvm_update *cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; enum i40e_status_code status; DEBUGFUNC("i40e_aq_update_nvm"); /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) { status = I40E_ERR_PARAM; goto i40e_aq_update_nvm_exit; } i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); /* If this is the last command in a series, set the proper flag. */ if (last_command) cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; cmd->module_pointer = module_pointer; cmd->offset = CPU_TO_LE32(offset); cmd->length = CPU_TO_LE16(length); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (length > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); i40e_aq_update_nvm_exit: return status; } /** * i40e_aq_get_lldp_mib * @hw: pointer to the hw struct * @bridge_type: type of bridge requested * @mib_type: Local, Remote or both Local and Remote MIBs * @buff: pointer to a user supplied buffer to store the MIB block * @buff_size: size of the buffer (in bytes) * @local_len : length of the returned Local LLDP MIB * @remote_len: length of the returned Remote LLDP MIB * @cmd_details: pointer to command details structure or NULL * * Requests the complete LLDP MIB (entire packet). **/ enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, u8 mib_type, void *buff, u16 buff_size, u16 *local_len, u16 *remote_len, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_get_mib *cmd = (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; struct i40e_aqc_lldp_get_mib *resp = (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; enum i40e_status_code status; if (buff_size == 0 || !buff) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); /* Indirect Command */ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); desc.datalen = CPU_TO_LE16(buff_size); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); if (!status) { if (local_len != NULL) *local_len = LE16_TO_CPU(resp->local_len); if (remote_len != NULL) *remote_len = LE16_TO_CPU(resp->remote_len); } return status; } /** * i40e_aq_set_lldp_mib - Set the LLDP MIB * @hw: pointer to the hw struct * @mib_type: Local, Remote or both Local and Remote MIBs * @buff: pointer to a user supplied buffer to store the MIB block * @buff_size: size of the buffer (in bytes) * @cmd_details: pointer to command details structure or NULL * * Set the LLDP MIB. **/ enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw, u8 mib_type, void *buff, u16 buff_size, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_set_local_mib *cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw; enum i40e_status_code status; if (buff_size == 0 || !buff) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_set_local_mib); /* Indirect Command */ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); desc.datalen = CPU_TO_LE16(buff_size); cmd->type = mib_type; cmd->length = CPU_TO_LE16(buff_size); cmd->address_high = CPU_TO_LE32(I40E_HI_WORD((u64)buff)); cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)buff)); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); return status; } /** * i40e_aq_cfg_lldp_mib_change_event * @hw: pointer to the hw struct * @enable_update: Enable or Disable event posting * @cmd_details: pointer to command details structure or NULL * * Enable or Disable posting of an event on ARQ when LLDP MIB * associated with the interface changes **/ enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, bool enable_update, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_update_mib *cmd = (struct i40e_aqc_lldp_update_mib *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); if (!enable_update) cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_add_lldp_tlv * @hw: pointer to the hw struct * @bridge_type: type of bridge * @buff: buffer with TLV to add * @buff_size: length of the buffer * @tlv_len: length of the TLV to be added * @mib_len: length of the LLDP MIB returned in response * @cmd_details: pointer to command details structure or NULL * * Add the specified TLV to LLDP Local MIB for the given bridge type, * it is responsibility of the caller to make sure that the TLV is not * already present in the LLDPDU. * In return firmware will write the complete LLDP MIB with the newly * added TLV in the response buffer. **/ enum i40e_status_code i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type, void *buff, u16 buff_size, u16 tlv_len, u16 *mib_len, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_add_tlv *cmd = (struct i40e_aqc_lldp_add_tlv *)&desc.params.raw; enum i40e_status_code status; if (buff_size == 0 || !buff || tlv_len == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_add_tlv); /* Indirect Command */ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); desc.datalen = CPU_TO_LE16(buff_size); cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); cmd->len = CPU_TO_LE16(tlv_len); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); if (!status) { if (mib_len != NULL) *mib_len = LE16_TO_CPU(desc.datalen); } return status; } /** * i40e_aq_update_lldp_tlv * @hw: pointer to the hw struct * @bridge_type: type of bridge * @buff: buffer with TLV to update * @buff_size: size of the buffer holding original and updated TLVs * @old_len: Length of the Original TLV * @new_len: Length of the Updated TLV * @offset: offset of the updated TLV in the buff * @mib_len: length of the returned LLDP MIB * @cmd_details: pointer to command details structure or NULL * * Update the specified TLV to the LLDP Local MIB for the given bridge type. * Firmware will place the complete LLDP MIB in response buffer with the * updated TLV. **/ enum i40e_status_code i40e_aq_update_lldp_tlv(struct i40e_hw *hw, u8 bridge_type, void *buff, u16 buff_size, u16 old_len, u16 new_len, u16 offset, u16 *mib_len, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_update_tlv *cmd = (struct i40e_aqc_lldp_update_tlv *)&desc.params.raw; enum i40e_status_code status; if (buff_size == 0 || !buff || offset == 0 || old_len == 0 || new_len == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_tlv); /* Indirect Command */ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); desc.datalen = CPU_TO_LE16(buff_size); cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); cmd->old_len = CPU_TO_LE16(old_len); cmd->new_offset = CPU_TO_LE16(offset); cmd->new_len = CPU_TO_LE16(new_len); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); if (!status) { if (mib_len != NULL) *mib_len = LE16_TO_CPU(desc.datalen); } return status; } /** * i40e_aq_delete_lldp_tlv * @hw: pointer to the hw struct * @bridge_type: type of bridge * @buff: pointer to a user supplied buffer that has the TLV * @buff_size: length of the buffer * @tlv_len: length of the TLV to be deleted * @mib_len: length of the returned LLDP MIB * @cmd_details: pointer to command details structure or NULL * * Delete the specified TLV from LLDP Local MIB for the given bridge type. * The firmware places the entire LLDP MIB in the response buffer. **/ enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw, u8 bridge_type, void *buff, u16 buff_size, u16 tlv_len, u16 *mib_len, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_add_tlv *cmd = (struct i40e_aqc_lldp_add_tlv *)&desc.params.raw; enum i40e_status_code status; if (buff_size == 0 || !buff) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_delete_tlv); /* Indirect Command */ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); desc.datalen = CPU_TO_LE16(buff_size); cmd->len = CPU_TO_LE16(tlv_len); cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); if (!status) { if (mib_len != NULL) *mib_len = LE16_TO_CPU(desc.datalen); } return status; } /** * i40e_aq_stop_lldp * @hw: pointer to the hw struct * @shutdown_agent: True if LLDP Agent needs to be Shutdown * @cmd_details: pointer to command details structure or NULL * * Stop or Shutdown the embedded LLDP Agent **/ enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_stop *cmd = (struct i40e_aqc_lldp_stop *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); if (shutdown_agent) cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_start_lldp * @hw: pointer to the hw struct * @cmd_details: pointer to command details structure or NULL * * Start the embedded LLDP Agent on all ports. **/ enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_start *cmd = (struct i40e_aqc_lldp_start *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); cmd->command = I40E_AQ_LLDP_AGENT_START; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_cee_dcb_config * @hw: pointer to the hw struct * @buff: response buffer that stores CEE operational configuration * @buff_size: size of the buffer passed * @cmd_details: pointer to command details structure or NULL * * Get CEE DCBX mode operational configuration from firmware **/ enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, void *buff, u16 buff_size, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; enum i40e_status_code status; if (buff_size == 0 || !buff) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, cmd_details); return status; } /** * i40e_aq_start_stop_dcbx - Start/Stop DCBx service in FW * @hw: pointer to the hw struct * @start_agent: True if DCBx Agent needs to be Started * False if DCBx Agent needs to be Stopped * @cmd_details: pointer to command details structure or NULL * * Start/Stop the embedded dcbx Agent **/ enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw, bool start_agent, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_stop_start_specific_agent *cmd = (struct i40e_aqc_lldp_stop_start_specific_agent *) &desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop_start_spec_agent); if (start_agent) cmd->command = I40E_AQC_START_SPECIFIC_AGENT_MASK; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_add_udp_tunnel * @hw: pointer to the hw struct * @udp_port: the UDP port to add * @header_len: length of the tunneling header length in DWords * @protocol_index: protocol index type * @filter_index: pointer to filter index * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw, u16 udp_port, u8 protocol_index, u8 *filter_index, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_udp_tunnel *cmd = (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; struct i40e_aqc_del_udp_tunnel_completion *resp = (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); cmd->udp_port = CPU_TO_LE16(udp_port); cmd->protocol_type = protocol_index; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status && filter_index) *filter_index = resp->index; return status; } /** * i40e_aq_del_udp_tunnel * @hw: pointer to the hw struct * @index: filter index * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_remove_udp_tunnel *cmd = (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); cmd->index = index; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_switch_resource_alloc (0x0204) * @hw: pointer to the hw struct * @num_entries: pointer to u8 to store the number of resource entries returned * @buf: pointer to a user supplied buffer. This buffer must be large enough * to store the resource information for all resource types. Each * resource type is a i40e_aqc_switch_resource_alloc_data structure. * @count: size, in bytes, of the buffer provided * @cmd_details: pointer to command details structure or NULL * * Query the resources allocated to a function. **/ enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw, u8 *num_entries, struct i40e_aqc_switch_resource_alloc_element_resp *buf, u16 count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_switch_resource_alloc *cmd_resp = (struct i40e_aqc_get_switch_resource_alloc *)&desc.params.raw; enum i40e_status_code status; u16 length = count * sizeof(*buf); i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_switch_resource_alloc); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (length > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details); if (!status && num_entries) *num_entries = cmd_resp->num_entries; return status; } /** * i40e_aq_delete_element - Delete switch element * @hw: pointer to the hw struct * @seid: the SEID to delete from the switch * @cmd_details: pointer to command details structure or NULL * * This deletes a switch element from the switch. **/ enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_switch_seid *cmd = (struct i40e_aqc_switch_seid *)&desc.params.raw; enum i40e_status_code status; if (seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40_aq_add_pvirt - Instantiate a Port Virtualizer on a port * @hw: pointer to the hw struct * @flags: component flags * @mac_seid: uplink seid (MAC SEID) * @vsi_seid: connected vsi seid * @ret_seid: seid of create pv component * * This instantiates an i40e port virtualizer with specified flags. * Depending on specified flags the port virtualizer can act as a * 802.1Qbr port virtualizer or a 802.1Qbg S-component. */ enum i40e_status_code i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags, u16 mac_seid, u16 vsi_seid, u16 *ret_seid) { struct i40e_aq_desc desc; struct i40e_aqc_add_update_pv *cmd = (struct i40e_aqc_add_update_pv *)&desc.params.raw; struct i40e_aqc_add_update_pv_completion *resp = (struct i40e_aqc_add_update_pv_completion *)&desc.params.raw; enum i40e_status_code status; if (vsi_seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_pv); cmd->command_flags = CPU_TO_LE16(flags); cmd->uplink_seid = CPU_TO_LE16(mac_seid); cmd->connected_seid = CPU_TO_LE16(vsi_seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); if (!status && ret_seid) *ret_seid = LE16_TO_CPU(resp->pv_seid); return status; } /** * i40e_aq_add_tag - Add an S/E-tag * @hw: pointer to the hw struct * @direct_to_queue: should s-tag direct flow to a specific queue * @vsi_seid: VSI SEID to use this tag * @tag: value of the tag * @queue_num: queue number, only valid is direct_to_queue is TRUE * @tags_used: return value, number of tags in use by this PF * @tags_free: return value, number of unallocated tags * @cmd_details: pointer to command details structure or NULL * * This associates an S- or E-tag to a VSI in the switch complex. It returns * the number of tags allocated by the PF, and the number of unallocated * tags available. **/ enum i40e_status_code i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue, u16 vsi_seid, u16 tag, u16 queue_num, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_tag *cmd = (struct i40e_aqc_add_tag *)&desc.params.raw; struct i40e_aqc_add_remove_tag_completion *resp = (struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw; enum i40e_status_code status; if (vsi_seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_tag); cmd->seid = CPU_TO_LE16(vsi_seid); cmd->tag = CPU_TO_LE16(tag); if (direct_to_queue) { cmd->flags = CPU_TO_LE16(I40E_AQC_ADD_TAG_FLAG_TO_QUEUE); cmd->queue_number = CPU_TO_LE16(queue_num); } status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { if (tags_used != NULL) *tags_used = LE16_TO_CPU(resp->tags_used); if (tags_free != NULL) *tags_free = LE16_TO_CPU(resp->tags_free); } return status; } /** * i40e_aq_remove_tag - Remove an S- or E-tag * @hw: pointer to the hw struct * @vsi_seid: VSI SEID this tag is associated with * @tag: value of the S-tag to delete * @tags_used: return value, number of tags in use by this PF * @tags_free: return value, number of unallocated tags * @cmd_details: pointer to command details structure or NULL * * This deletes an S- or E-tag from a VSI in the switch complex. It returns * the number of tags allocated by the PF, and the number of unallocated * tags available. **/ enum i40e_status_code i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid, u16 tag, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_remove_tag *cmd = (struct i40e_aqc_remove_tag *)&desc.params.raw; struct i40e_aqc_add_remove_tag_completion *resp = (struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw; enum i40e_status_code status; if (vsi_seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_tag); cmd->seid = CPU_TO_LE16(vsi_seid); cmd->tag = CPU_TO_LE16(tag); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { if (tags_used != NULL) *tags_used = LE16_TO_CPU(resp->tags_used); if (tags_free != NULL) *tags_free = LE16_TO_CPU(resp->tags_free); } return status; } /** * i40e_aq_add_mcast_etag - Add a multicast E-tag * @hw: pointer to the hw struct * @pv_seid: Port Virtualizer of this SEID to associate E-tag with * @etag: value of E-tag to add * @num_tags_in_buf: number of unicast E-tags in indirect buffer * @buf: address of indirect buffer * @tags_used: return value, number of E-tags in use by this port * @tags_free: return value, number of unallocated M-tags * @cmd_details: pointer to command details structure or NULL * * This associates a multicast E-tag to a port virtualizer. It will return * the number of tags allocated by the PF, and the number of unallocated * tags available. * * The indirect buffer pointed to by buf is a list of 2-byte E-tags, * num_tags_in_buf long. **/ enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pv_seid, u16 etag, u8 num_tags_in_buf, void *buf, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_mcast_etag *cmd = (struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw; struct i40e_aqc_add_remove_mcast_etag_completion *resp = (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw; enum i40e_status_code status; u16 length = sizeof(u16) * num_tags_in_buf; if ((pv_seid == 0) || (buf == NULL) || (num_tags_in_buf == 0)) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_multicast_etag); cmd->pv_seid = CPU_TO_LE16(pv_seid); cmd->etag = CPU_TO_LE16(etag); cmd->num_unicast_etags = num_tags_in_buf; desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (length > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details); if (!status) { if (tags_used != NULL) *tags_used = LE16_TO_CPU(resp->mcast_etags_used); if (tags_free != NULL) *tags_free = LE16_TO_CPU(resp->mcast_etags_free); } return status; } /** * i40e_aq_remove_mcast_etag - Remove a multicast E-tag * @hw: pointer to the hw struct * @pv_seid: Port Virtualizer SEID this M-tag is associated with * @etag: value of the E-tag to remove * @tags_used: return value, number of tags in use by this port * @tags_free: return value, number of unallocated tags * @cmd_details: pointer to command details structure or NULL * * This deletes an E-tag from the port virtualizer. It will return * the number of tags allocated by the port, and the number of unallocated * tags available. **/ enum i40e_status_code i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pv_seid, u16 etag, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_mcast_etag *cmd = (struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw; struct i40e_aqc_add_remove_mcast_etag_completion *resp = (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw; enum i40e_status_code status; if (pv_seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_multicast_etag); cmd->pv_seid = CPU_TO_LE16(pv_seid); cmd->etag = CPU_TO_LE16(etag); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { if (tags_used != NULL) *tags_used = LE16_TO_CPU(resp->mcast_etags_used); if (tags_free != NULL) *tags_free = LE16_TO_CPU(resp->mcast_etags_free); } return status; } /** * i40e_aq_update_tag - Update an S/E-tag * @hw: pointer to the hw struct * @vsi_seid: VSI SEID using this S-tag * @old_tag: old tag value * @new_tag: new tag value * @tags_used: return value, number of tags in use by this PF * @tags_free: return value, number of unallocated tags * @cmd_details: pointer to command details structure or NULL * * This updates the value of the tag currently attached to this VSI * in the switch complex. It will return the number of tags allocated * by the PF, and the number of unallocated tags available. **/ enum i40e_status_code i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid, u16 old_tag, u16 new_tag, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_update_tag *cmd = (struct i40e_aqc_update_tag *)&desc.params.raw; struct i40e_aqc_update_tag_completion *resp = (struct i40e_aqc_update_tag_completion *)&desc.params.raw; enum i40e_status_code status; if (vsi_seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_update_tag); cmd->seid = CPU_TO_LE16(vsi_seid); cmd->old_tag = CPU_TO_LE16(old_tag); cmd->new_tag = CPU_TO_LE16(new_tag); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { if (tags_used != NULL) *tags_used = LE16_TO_CPU(resp->tags_used); if (tags_free != NULL) *tags_free = LE16_TO_CPU(resp->tags_free); } return status; } /** * i40e_aq_dcb_ignore_pfc - Ignore PFC for given TCs * @hw: pointer to the hw struct * @tcmap: TC map for request/release any ignore PFC condition * @request: request or release ignore PFC condition * @tcmap_ret: return TCs for which PFC is currently ignored * @cmd_details: pointer to command details structure or NULL * * This sends out request/release to ignore PFC condition for a TC. * It will return the TCs for which PFC is currently ignored. **/ enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_pfc_ignore *cmd_resp = (struct i40e_aqc_pfc_ignore *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_ignore_pfc); if (request) cmd_resp->command_flags = I40E_AQC_PFC_IGNORE_SET; cmd_resp->tc_bitmap = tcmap; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { if (tcmap_ret != NULL) *tcmap_ret = cmd_resp->tc_bitmap; } return status; } /** * i40e_aq_dcb_updated - DCB Updated Command * @hw: pointer to the hw struct * @cmd_details: pointer to command details structure or NULL * * When LLDP is handled in PF this command is used by the PF * to notify EMP that a DCB setting is modified. * When LLDP is handled in EMP this command is used by the PF * to notify EMP whenever one of the following parameters get * modified: * - PFCLinkDelayAllowance in PRTDCB_GENC.PFCLDA * - PCIRTT in PRTDCB_GENC.PCIRTT * - Maximum Frame Size for non-FCoE TCs set by PRTDCB_TDPUC.MAX_TXFRAME. * EMP will return when the shared RPB settings have been * recomputed and modified. The retval field in the descriptor * will be set to 0 when RPB is modified. **/ enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_add_statistics - Add a statistics block to a VLAN in a switch. * @hw: pointer to the hw struct * @seid: defines the SEID of the switch for which the stats are requested * @vlan_id: the VLAN ID for which the statistics are requested * @stat_index: index of the statistics counters block assigned to this VLAN * @cmd_details: pointer to command details structure or NULL * * XL710 supports 128 smonVlanStats counters.This command is used to * allocate a set of smonVlanStats counters to a specific VLAN in a specific * switch. **/ enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid, u16 vlan_id, u16 *stat_index, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_statistics *cmd_resp = (struct i40e_aqc_add_remove_statistics *)&desc.params.raw; enum i40e_status_code status; if ((seid == 0) || (stat_index == NULL)) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_statistics); cmd_resp->seid = CPU_TO_LE16(seid); cmd_resp->vlan = CPU_TO_LE16(vlan_id); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status && stat_index) *stat_index = LE16_TO_CPU(cmd_resp->stat_index); return status; } /** * i40e_aq_remove_statistics - Remove a statistics block to a VLAN in a switch. * @hw: pointer to the hw struct * @seid: defines the SEID of the switch for which the stats are requested * @vlan_id: the VLAN ID for which the statistics are requested * @stat_index: index of the statistics counters block assigned to this VLAN * @cmd_details: pointer to command details structure or NULL * * XL710 supports 128 smonVlanStats counters.This command is used to * deallocate a set of smonVlanStats counters to a specific VLAN in a specific * switch. **/ enum i40e_status_code i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid, u16 vlan_id, u16 stat_index, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_statistics *cmd = (struct i40e_aqc_add_remove_statistics *)&desc.params.raw; enum i40e_status_code status; if (seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_statistics); cmd->seid = CPU_TO_LE16(seid); cmd->vlan = CPU_TO_LE16(vlan_id); cmd->stat_index = CPU_TO_LE16(stat_index); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_port_parameters - set physical port parameters. * @hw: pointer to the hw struct * @bad_frame_vsi: defines the VSI to which bad frames are forwarded * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded * @double_vlan: if set double VLAN is enabled * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_set_port_parameters(struct i40e_hw *hw, u16 bad_frame_vsi, bool save_bad_pac, bool pad_short_pac, bool double_vlan, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aqc_set_port_parameters *cmd; enum i40e_status_code status; struct i40e_aq_desc desc; u16 command_flags = 0; cmd = (struct i40e_aqc_set_port_parameters *)&desc.params.raw; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_port_parameters); cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi); if (save_bad_pac) command_flags |= I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS; if (pad_short_pac) command_flags |= I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS; if (double_vlan) command_flags |= I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA; cmd->command_flags = CPU_TO_LE16(command_flags); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler * @hw: pointer to the hw struct * @seid: seid for the physical port/switching component/vsi * @buff: Indirect buffer to hold data parameters and response * @buff_size: Indirect buffer size * @opcode: Tx scheduler AQ command opcode * @cmd_details: pointer to command details structure or NULL * * Generic command handler for Tx scheduler AQ commands **/ static enum i40e_status_code i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, void *buff, u16 buff_size, enum i40e_admin_queue_opc opcode, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_tx_sched_ind *cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; enum i40e_status_code status; bool cmd_param_flag = FALSE; switch (opcode) { case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit: case i40e_aqc_opc_configure_vsi_tc_bw: case i40e_aqc_opc_enable_switching_comp_ets: case i40e_aqc_opc_modify_switching_comp_ets: case i40e_aqc_opc_disable_switching_comp_ets: case i40e_aqc_opc_configure_switching_comp_ets_bw_limit: case i40e_aqc_opc_configure_switching_comp_bw_config: cmd_param_flag = TRUE; break; case i40e_aqc_opc_query_vsi_bw_config: case i40e_aqc_opc_query_vsi_ets_sla_config: case i40e_aqc_opc_query_switching_comp_ets_config: case i40e_aqc_opc_query_port_ets_config: case i40e_aqc_opc_query_switching_comp_bw_config: cmd_param_flag = FALSE; break; default: return I40E_ERR_PARAM; } i40e_fill_default_direct_cmd_desc(&desc, opcode); /* Indirect command */ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (cmd_param_flag) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); desc.datalen = CPU_TO_LE16(buff_size); cmd->vsi_seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); return status; } /** * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit * @hw: pointer to the hw struct * @seid: VSI seid * @credit: BW limit credits (0 = disabled) * @max_credit: Max BW limit credits * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, u16 seid, u16 credit, u8 max_credit, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_configure_vsi_bw_limit *cmd = (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_configure_vsi_bw_limit); cmd->vsi_seid = CPU_TO_LE16(seid); cmd->credit = CPU_TO_LE16(credit); cmd->max_credit = max_credit; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_config_switch_comp_bw_limit - Configure Switching component BW Limit * @hw: pointer to the hw struct * @seid: switching component seid * @credit: BW limit credits (0 = disabled) * @max_bw: Max BW limit credits * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw, u16 seid, u16 credit, u8 max_bw, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_configure_switching_comp_bw_limit *cmd = (struct i40e_aqc_configure_switching_comp_bw_limit *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_configure_switching_comp_bw_limit); cmd->seid = CPU_TO_LE16(seid); cmd->credit = CPU_TO_LE16(credit); cmd->max_bw = max_bw; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_config_vsi_ets_sla_bw_limit - Config VSI BW Limit per TC * @hw: pointer to the hw struct * @seid: VSI seid * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_configure_vsi_ets_sla_bw_limit, cmd_details); } /** * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC * @hw: pointer to the hw struct * @seid: VSI seid * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_configure_vsi_tc_bw, cmd_details); } /** * i40e_aq_config_switch_comp_ets_bw_limit - Config Switch comp BW Limit per TC * @hw: pointer to the hw struct * @seid: seid of the switching component * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit( struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_configure_switching_comp_ets_bw_limit, cmd_details); } /** * i40e_aq_query_vsi_bw_config - Query VSI BW configuration * @hw: pointer to the hw struct * @seid: seid of the VSI * @bw_data: Buffer to hold VSI BW configuration * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_vsi_bw_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_vsi_bw_config, cmd_details); } /** * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC * @hw: pointer to the hw struct * @seid: seid of the VSI * @bw_data: Buffer to hold VSI BW configuration per TC * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_vsi_ets_sla_config, cmd_details); } /** * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC * @hw: pointer to the hw struct * @seid: seid of the switching component * @bw_data: Buffer to hold switching component's per TC BW config * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_switching_comp_ets_config, cmd_details); } /** * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration * @hw: pointer to the hw struct * @seid: seid of the VSI or switching component connected to Physical Port * @bw_data: Buffer to hold current ETS configuration for the Physical Port * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_query_port_ets_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_port_ets_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_port_ets_config, cmd_details); } /** * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration * @hw: pointer to the hw struct * @seid: seid of the switching component * @bw_data: Buffer to hold switching component's BW configuration * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_switching_comp_bw_config, cmd_details); } /** * i40e_validate_filter_settings * @hw: pointer to the hardware structure * @settings: Filter control settings * * Check and validate the filter control settings passed. * The function checks for the valid filter/context sizes being * passed for FCoE and PE. * * Returns I40E_SUCCESS if the values passed are valid and within * range else returns an error. **/ static enum i40e_status_code i40e_validate_filter_settings(struct i40e_hw *hw, struct i40e_filter_control_settings *settings) { u32 fcoe_cntx_size, fcoe_filt_size; u32 pe_cntx_size, pe_filt_size; u32 fcoe_fmax; u32 val; /* Validate FCoE settings passed */ switch (settings->fcoe_filt_num) { case I40E_HASH_FILTER_SIZE_1K: case I40E_HASH_FILTER_SIZE_2K: case I40E_HASH_FILTER_SIZE_4K: case I40E_HASH_FILTER_SIZE_8K: case I40E_HASH_FILTER_SIZE_16K: case I40E_HASH_FILTER_SIZE_32K: fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE; fcoe_filt_size <<= (u32)settings->fcoe_filt_num; break; default: return I40E_ERR_PARAM; } switch (settings->fcoe_cntx_num) { case I40E_DMA_CNTX_SIZE_512: case I40E_DMA_CNTX_SIZE_1K: case I40E_DMA_CNTX_SIZE_2K: case I40E_DMA_CNTX_SIZE_4K: fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num; break; default: return I40E_ERR_PARAM; } /* Validate PE settings passed */ switch (settings->pe_filt_num) { case I40E_HASH_FILTER_SIZE_1K: case I40E_HASH_FILTER_SIZE_2K: case I40E_HASH_FILTER_SIZE_4K: case I40E_HASH_FILTER_SIZE_8K: case I40E_HASH_FILTER_SIZE_16K: case I40E_HASH_FILTER_SIZE_32K: case I40E_HASH_FILTER_SIZE_64K: case I40E_HASH_FILTER_SIZE_128K: case I40E_HASH_FILTER_SIZE_256K: case I40E_HASH_FILTER_SIZE_512K: case I40E_HASH_FILTER_SIZE_1M: pe_filt_size = I40E_HASH_FILTER_BASE_SIZE; pe_filt_size <<= (u32)settings->pe_filt_num; break; default: return I40E_ERR_PARAM; } switch (settings->pe_cntx_num) { case I40E_DMA_CNTX_SIZE_512: case I40E_DMA_CNTX_SIZE_1K: case I40E_DMA_CNTX_SIZE_2K: case I40E_DMA_CNTX_SIZE_4K: case I40E_DMA_CNTX_SIZE_8K: case I40E_DMA_CNTX_SIZE_16K: case I40E_DMA_CNTX_SIZE_32K: case I40E_DMA_CNTX_SIZE_64K: case I40E_DMA_CNTX_SIZE_128K: case I40E_DMA_CNTX_SIZE_256K: pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; pe_cntx_size <<= (u32)settings->pe_cntx_num; break; default: return I40E_ERR_PARAM; } /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ val = rd32(hw, I40E_GLHMC_FCOEFMAX); fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK) >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT; if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) return I40E_ERR_INVALID_SIZE; return I40E_SUCCESS; } /** * i40e_set_filter_control * @hw: pointer to the hardware structure * @settings: Filter control settings * * Set the Queue Filters for PE/FCoE and enable filters required * for a single PF. It is expected that these settings are programmed * at the driver initialization time. **/ enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw, struct i40e_filter_control_settings *settings) { enum i40e_status_code ret = I40E_SUCCESS; u32 hash_lut_size = 0; u32 val; if (!settings) return I40E_ERR_PARAM; /* Validate the input settings */ ret = i40e_validate_filter_settings(hw, settings); if (ret) return ret; /* Read the PF Queue Filter control register */ val = rd32(hw, I40E_PFQF_CTL_0); /* Program required PE hash buckets for the PF */ val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) & I40E_PFQF_CTL_0_PEHSIZE_MASK; /* Program required PE contexts for the PF */ val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK; val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) & I40E_PFQF_CTL_0_PEDSIZE_MASK; /* Program required FCoE hash buckets for the PF */ val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK; val |= ((u32)settings->fcoe_filt_num << I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) & I40E_PFQF_CTL_0_PFFCHSIZE_MASK; /* Program required FCoE DDP contexts for the PF */ val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK; val |= ((u32)settings->fcoe_cntx_num << I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) & I40E_PFQF_CTL_0_PFFCDSIZE_MASK; /* Program Hash LUT size for the PF */ val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512) hash_lut_size = 1; val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) & I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */ if (settings->enable_fdir) val |= I40E_PFQF_CTL_0_FD_ENA_MASK; if (settings->enable_ethtype) val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK; if (settings->enable_macvlan) val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; wr32(hw, I40E_PFQF_CTL_0, val); return I40E_SUCCESS; } /** * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter * @hw: pointer to the hw struct * @mac_addr: MAC address to use in the filter * @ethtype: Ethertype to use in the filter * @flags: Flags that needs to be applied to the filter * @vsi_seid: seid of the control VSI * @queue: VSI queue number to send the packet to * @is_add: Add control packet filter if True else remove * @stats: Structure to hold information on control filter counts * @cmd_details: pointer to command details structure or NULL * * This command will Add or Remove control packet filter for a control VSI. * In return it will update the total number of perfect filter count in * the stats member. **/ enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, u8 *mac_addr, u16 ethtype, u16 flags, u16 vsi_seid, u16 queue, bool is_add, struct i40e_control_filter_stats *stats, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_control_packet_filter *cmd = (struct i40e_aqc_add_remove_control_packet_filter *) &desc.params.raw; struct i40e_aqc_add_remove_control_packet_filter_completion *resp = (struct i40e_aqc_add_remove_control_packet_filter_completion *) &desc.params.raw; enum i40e_status_code status; if (vsi_seid == 0) return I40E_ERR_PARAM; if (is_add) { i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_control_packet_filter); cmd->queue = CPU_TO_LE16(queue); } else { i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_control_packet_filter); } if (mac_addr) i40e_memcpy(cmd->mac, mac_addr, I40E_ETH_LENGTH_OF_ADDRESS, I40E_NONDMA_TO_NONDMA); cmd->etype = CPU_TO_LE16(ethtype); cmd->flags = CPU_TO_LE16(flags); cmd->seid = CPU_TO_LE16(vsi_seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status && stats) { stats->mac_etype_used = LE16_TO_CPU(resp->mac_etype_used); stats->etype_used = LE16_TO_CPU(resp->etype_used); stats->mac_etype_free = LE16_TO_CPU(resp->mac_etype_free); stats->etype_free = LE16_TO_CPU(resp->etype_free); } return status; } /** * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control * @hw: pointer to the hw struct * @seid: VSI seid to add ethertype filter from **/ #define I40E_FLOW_CONTROL_ETHTYPE 0x8808 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, u16 seid) { u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; enum i40e_status_code status; status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, seid, 0, TRUE, NULL, NULL); if (status) DEBUGOUT("Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); } /** * i40e_aq_add_cloud_filters * @hw: pointer to the hardware structure * @seid: VSI seid to add cloud filters from * @filters: Buffer which contains the filters to be added * @filter_count: number of filters contained in the buffer * * Set the cloud filters for a given VSI. The contents of the * i40e_aqc_add_remove_cloud_filters_element_data are filled * in by the caller of the function. * **/ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid, struct i40e_aqc_add_remove_cloud_filters_element_data *filters, u8 filter_count) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_cloud_filters *cmd = (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; u16 buff_len; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_cloud_filters); buff_len = filter_count * sizeof(*filters); desc.datalen = CPU_TO_LE16(buff_len); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); cmd->num_filters = filter_count; cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); return status; } /** * i40e_aq_remove_cloud_filters * @hw: pointer to the hardware structure * @seid: VSI seid to remove cloud filters from * @filters: Buffer which contains the filters to be removed * @filter_count: number of filters contained in the buffer * * Remove the cloud filters for a given VSI. The contents of the * i40e_aqc_add_remove_cloud_filters_element_data are filled * in by the caller of the function. * **/ enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw, u16 seid, struct i40e_aqc_add_remove_cloud_filters_element_data *filters, u8 filter_count) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_cloud_filters *cmd = (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; enum i40e_status_code status; u16 buff_len; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_cloud_filters); buff_len = filter_count * sizeof(*filters); desc.datalen = CPU_TO_LE16(buff_len); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); cmd->num_filters = filter_count; cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); return status; } /** * i40e_aq_alternate_write * @hw: pointer to the hardware structure * @reg_addr0: address of first dword to be read * @reg_val0: value to be written under 'reg_addr0' * @reg_addr1: address of second dword to be read * @reg_val1: value to be written under 'reg_addr1' * * Write one or two dwords to alternate structure. Fields are indicated * by 'reg_addr0' and 'reg_addr1' register numbers. * **/ enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw, u32 reg_addr0, u32 reg_val0, u32 reg_addr1, u32 reg_val1) { struct i40e_aq_desc desc; struct i40e_aqc_alternate_write *cmd_resp = (struct i40e_aqc_alternate_write *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_write); cmd_resp->address0 = CPU_TO_LE32(reg_addr0); cmd_resp->address1 = CPU_TO_LE32(reg_addr1); cmd_resp->data0 = CPU_TO_LE32(reg_val0); cmd_resp->data1 = CPU_TO_LE32(reg_val1); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); return status; } /** * i40e_aq_alternate_write_indirect * @hw: pointer to the hardware structure * @addr: address of a first register to be modified * @dw_count: number of alternate structure fields to write * @buffer: pointer to the command buffer * * Write 'dw_count' dwords from 'buffer' to alternate structure * starting at 'addr'. * **/ enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw, u32 addr, u32 dw_count, void *buffer) { struct i40e_aq_desc desc; struct i40e_aqc_alternate_ind_write *cmd_resp = (struct i40e_aqc_alternate_ind_write *)&desc.params.raw; enum i40e_status_code status; if (buffer == NULL) return I40E_ERR_PARAM; /* Indirect command */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_write_indirect); desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD); desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF); if (dw_count > (I40E_AQ_LARGE_BUF/4)) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); cmd_resp->address = CPU_TO_LE32(addr); cmd_resp->length = CPU_TO_LE32(dw_count); status = i40e_asq_send_command(hw, &desc, buffer, I40E_LO_DWORD(4*dw_count), NULL); return status; } /** * i40e_aq_alternate_read * @hw: pointer to the hardware structure * @reg_addr0: address of first dword to be read * @reg_val0: pointer for data read from 'reg_addr0' * @reg_addr1: address of second dword to be read * @reg_val1: pointer for data read from 'reg_addr1' * * Read one or two dwords from alternate structure. Fields are indicated * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer * is not passed then only register at 'reg_addr0' is read. * **/ enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw, u32 reg_addr0, u32 *reg_val0, u32 reg_addr1, u32 *reg_val1) { struct i40e_aq_desc desc; struct i40e_aqc_alternate_write *cmd_resp = (struct i40e_aqc_alternate_write *)&desc.params.raw; enum i40e_status_code status; if (reg_val0 == NULL) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); cmd_resp->address0 = CPU_TO_LE32(reg_addr0); cmd_resp->address1 = CPU_TO_LE32(reg_addr1); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); if (status == I40E_SUCCESS) { *reg_val0 = LE32_TO_CPU(cmd_resp->data0); if (reg_val1 != NULL) *reg_val1 = LE32_TO_CPU(cmd_resp->data1); } return status; } /** * i40e_aq_alternate_read_indirect * @hw: pointer to the hardware structure * @addr: address of the alternate structure field * @dw_count: number of alternate structure fields to read * @buffer: pointer to the command buffer * * Read 'dw_count' dwords from alternate structure starting at 'addr' and * place them in 'buffer'. The buffer should be allocated by caller. * **/ enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw, u32 addr, u32 dw_count, void *buffer) { struct i40e_aq_desc desc; struct i40e_aqc_alternate_ind_write *cmd_resp = (struct i40e_aqc_alternate_ind_write *)&desc.params.raw; enum i40e_status_code status; if (buffer == NULL) return I40E_ERR_PARAM; /* Indirect command */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read_indirect); desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD); desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF); if (dw_count > (I40E_AQ_LARGE_BUF/4)) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); cmd_resp->address = CPU_TO_LE32(addr); cmd_resp->length = CPU_TO_LE32(dw_count); status = i40e_asq_send_command(hw, &desc, buffer, I40E_LO_DWORD(4*dw_count), NULL); return status; } /** * i40e_aq_alternate_clear * @hw: pointer to the HW structure. * * Clear the alternate structures of the port from which the function * is called. * **/ enum i40e_status_code i40e_aq_alternate_clear(struct i40e_hw *hw) { struct i40e_aq_desc desc; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_clear_port); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); return status; } /** * i40e_aq_alternate_write_done * @hw: pointer to the HW structure. * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS * @reset_needed: indicates the SW should trigger GLOBAL reset * * Indicates to the FW that alternate structures have been changed. * **/ enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw, u8 bios_mode, bool *reset_needed) { struct i40e_aq_desc desc; struct i40e_aqc_alternate_write_done *cmd = (struct i40e_aqc_alternate_write_done *)&desc.params.raw; enum i40e_status_code status; if (reset_needed == NULL) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_write_done); cmd->cmd_flags = CPU_TO_LE16(bios_mode); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); if (!status && reset_needed) *reset_needed = ((LE16_TO_CPU(cmd->cmd_flags) & I40E_AQ_ALTERNATE_RESET_NEEDED) != 0); return status; } /** * i40e_aq_set_oem_mode * @hw: pointer to the HW structure. * @oem_mode: the OEM mode to be used * * Sets the device to a specific operating mode. Currently the only supported * mode is no_clp, which causes FW to refrain from using Alternate RAM. * **/ enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw, u8 oem_mode) { struct i40e_aq_desc desc; struct i40e_aqc_alternate_write_done *cmd = (struct i40e_aqc_alternate_write_done *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_set_mode); cmd->cmd_flags = CPU_TO_LE16(oem_mode); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); return status; } /** * i40e_aq_resume_port_tx * @hw: pointer to the hardware structure * @cmd_details: pointer to command details structure or NULL * * Resume port's Tx traffic **/ enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_set_pci_config_data - store PCI bus info * @hw: pointer to hardware structure * @link_status: the link status word from PCI config space * * Stores the PCI bus info (speed, width, type) within the i40e_hw structure **/ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) { hw->bus.type = i40e_bus_type_pci_express; switch (link_status & I40E_PCI_LINK_WIDTH) { case I40E_PCI_LINK_WIDTH_1: hw->bus.width = i40e_bus_width_pcie_x1; break; case I40E_PCI_LINK_WIDTH_2: hw->bus.width = i40e_bus_width_pcie_x2; break; case I40E_PCI_LINK_WIDTH_4: hw->bus.width = i40e_bus_width_pcie_x4; break; case I40E_PCI_LINK_WIDTH_8: hw->bus.width = i40e_bus_width_pcie_x8; break; default: hw->bus.width = i40e_bus_width_unknown; break; } switch (link_status & I40E_PCI_LINK_SPEED) { case I40E_PCI_LINK_SPEED_2500: hw->bus.speed = i40e_bus_speed_2500; break; case I40E_PCI_LINK_SPEED_5000: hw->bus.speed = i40e_bus_speed_5000; break; case I40E_PCI_LINK_SPEED_8000: hw->bus.speed = i40e_bus_speed_8000; break; default: hw->bus.speed = i40e_bus_speed_unknown; break; } } /** * i40e_aq_debug_dump * @hw: pointer to the hardware structure * @cluster_id: specific cluster to dump * @table_id: table id within cluster * @start_index: index of line in the block to read * @buff_size: dump buffer size * @buff: dump buffer * @ret_buff_size: actual buffer size returned * @ret_next_table: next block to read * @ret_next_index: next index to read * * Dump internal FW/HW data for debug purposes. * **/ enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, u8 table_id, u32 start_index, u16 buff_size, void *buff, u16 *ret_buff_size, u8 *ret_next_table, u32 *ret_next_index, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_debug_dump_internals *cmd = (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; struct i40e_aqc_debug_dump_internals *resp = (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; enum i40e_status_code status; if (buff_size == 0 || !buff) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_dump_internals); /* Indirect Command */ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); cmd->cluster_id = cluster_id; cmd->table_id = table_id; cmd->idx = CPU_TO_LE32(start_index); desc.datalen = CPU_TO_LE16(buff_size); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); if (!status) { if (ret_buff_size != NULL) *ret_buff_size = LE16_TO_CPU(desc.datalen); if (ret_next_table != NULL) *ret_next_table = resp->table_id; if (ret_next_index != NULL) *ret_next_index = LE32_TO_CPU(resp->idx); } return status; } /** * i40e_read_bw_from_alt_ram * @hw: pointer to the hardware structure * @max_bw: pointer for max_bw read * @min_bw: pointer for min_bw read * @min_valid: pointer for bool that is TRUE if min_bw is a valid value * @max_valid: pointer for bool that is TRUE if max_bw is a valid value * * Read bw from the alternate ram for the given pf **/ enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw, u32 *max_bw, u32 *min_bw, bool *min_valid, bool *max_valid) { enum i40e_status_code status; u32 max_bw_addr, min_bw_addr; /* Calculate the address of the min/max bw registers */ max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + I40E_ALT_STRUCT_MAX_BW_OFFSET + (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + I40E_ALT_STRUCT_MIN_BW_OFFSET + (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); /* Read the bandwidths from alt ram */ status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, min_bw_addr, min_bw); if (*min_bw & I40E_ALT_BW_VALID_MASK) *min_valid = TRUE; else *min_valid = FALSE; if (*max_bw & I40E_ALT_BW_VALID_MASK) *max_valid = TRUE; else *max_valid = FALSE; return status; } /** * i40e_aq_configure_partition_bw * @hw: pointer to the hardware structure * @bw_data: Buffer holding valid pfs and bw limits * @cmd_details: pointer to command details * * Configure partitions guaranteed/max bw **/ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw, struct i40e_aqc_configure_partition_bw_data *bw_data, struct i40e_asq_cmd_details *cmd_details) { enum i40e_status_code status; struct i40e_aq_desc desc; u16 bwd_size = sizeof(*bw_data); i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_configure_partition_bw); /* Indirect command */ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); if (bwd_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); desc.datalen = CPU_TO_LE16(bwd_size); status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, cmd_details); return status; } /** * i40e_aq_send_msg_to_pf * @hw: pointer to the hardware structure * @v_opcode: opcodes for VF-PF communication * @v_retval: return error code * @msg: pointer to the msg buffer * @msglen: msg length * @cmd_details: pointer to command details * * Send message to PF driver using admin queue. By default, this message * is sent asynchronously, i.e. i40e_asq_send_command() does not wait for * completion before returning. **/ enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw, enum i40e_virtchnl_ops v_opcode, enum i40e_status_code v_retval, u8 *msg, u16 msglen, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_asq_cmd_details details; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI); desc.cookie_high = CPU_TO_LE32(v_opcode); desc.cookie_low = CPU_TO_LE32(v_retval); if (msglen) { desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (msglen > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); desc.datalen = CPU_TO_LE16(msglen); } if (!cmd_details) { i40e_memset(&details, 0, sizeof(details), I40E_NONDMA_MEM); details.async = TRUE; cmd_details = &details; } status = i40e_asq_send_command(hw, (struct i40e_aq_desc *)&desc, msg, msglen, cmd_details); return status; } /** * i40e_vf_parse_hw_config * @hw: pointer to the hardware structure * @msg: pointer to the virtual channel VF resource structure * * Given a VF resource message from the PF, populate the hw struct * with appropriate information. **/ void i40e_vf_parse_hw_config(struct i40e_hw *hw, struct i40e_virtchnl_vf_resource *msg) { struct i40e_virtchnl_vsi_resource *vsi_res; int i; vsi_res = &msg->vsi_res[0]; hw->dev_caps.num_vsis = msg->num_vsis; hw->dev_caps.num_rx_qp = msg->num_queue_pairs; hw->dev_caps.num_tx_qp = msg->num_queue_pairs; hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; hw->dev_caps.dcb = msg->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_L2; hw->dev_caps.fcoe = (msg->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0; hw->dev_caps.iwarp = (msg->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0; for (i = 0; i < msg->num_vsis; i++) { if (vsi_res->vsi_type == I40E_VSI_SRIOV) { i40e_memcpy(hw->mac.perm_addr, vsi_res->default_mac_addr, I40E_ETH_LENGTH_OF_ADDRESS, I40E_NONDMA_TO_NONDMA); i40e_memcpy(hw->mac.addr, vsi_res->default_mac_addr, I40E_ETH_LENGTH_OF_ADDRESS, I40E_NONDMA_TO_NONDMA); } vsi_res++; } } /** * i40e_vf_reset * @hw: pointer to the hardware structure * * Send a VF_RESET message to the PF. Does not wait for response from PF * as none will be forthcoming. Immediately after calling this function, * the admin queue should be shut down and (optionally) reinitialized. **/ enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw) { return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF, I40E_SUCCESS, NULL, 0, NULL); } Index: head/sys/dev/ixl/i40e_type.h =================================================================== --- head/sys/dev/ixl/i40e_type.h (revision 299550) +++ head/sys/dev/ixl/i40e_type.h (revision 299551) @@ -1,1532 +1,1532 @@ /****************************************************************************** Copyright (c) 2013-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _I40E_TYPE_H_ #define _I40E_TYPE_H_ #include "i40e_status.h" #include "i40e_osdep.h" #include "i40e_register.h" #include "i40e_adminq.h" #include "i40e_hmc.h" #include "i40e_lan_hmc.h" #include "i40e_devids.h" #define UNREFERENCED_XPARAMETER #define BIT(a) (1UL << (a)) #define BIT_ULL(a) (1ULL << (a)) #ifndef I40E_MASK /* I40E_MASK is a macro used on 32 bit registers */ #define I40E_MASK(mask, shift) (mask << shift) #endif #define I40E_MAX_PF 16 #define I40E_MAX_PF_VSI 64 #define I40E_MAX_PF_QP 128 #define I40E_MAX_VSI_QP 16 #define I40E_MAX_VF_VSI 3 #define I40E_MAX_CHAINED_RX_BUFFERS 5 #define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16 /* something less than 1 minute */ #define I40E_HEARTBEAT_TIMEOUT (HZ * 50) /* Max default timeout in ms, */ #define I40E_MAX_NVM_TIMEOUT 18000 /* Check whether address is multicast. */ #define I40E_IS_MULTICAST(address) (bool)(((u8 *)(address))[0] & ((u8)0x01)) /* Check whether an address is broadcast. */ #define I40E_IS_BROADCAST(address) \ ((((u8 *)(address))[0] == ((u8)0xff)) && \ (((u8 *)(address))[1] == ((u8)0xff))) /* Switch from ms to the 1usec global time (this is the GTIME resolution) */ #define I40E_MS_TO_GTIME(time) ((time) * 1000) /* forward declaration */ struct i40e_hw; typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); #define I40E_ETH_LENGTH_OF_ADDRESS 6 /* Data type manipulation macros. */ #define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF)) #define I40E_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF)) #define I40E_HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF)) #define I40E_LO_WORD(x) ((u16)((x) & 0xFFFF)) #define I40E_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF)) #define I40E_LO_BYTE(x) ((u8)((x) & 0xFF)) /* Number of Transmit Descriptors must be a multiple of 8. */ #define I40E_REQ_TX_DESCRIPTOR_MULTIPLE 8 /* Number of Receive Descriptors must be a multiple of 32 if * the number of descriptors is greater than 32. */ #define I40E_REQ_RX_DESCRIPTOR_MULTIPLE 32 #define I40E_DESC_UNUSED(R) \ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ (R)->next_to_clean - (R)->next_to_use - 1) /* bitfields for Tx queue mapping in QTX_CTL */ #define I40E_QTX_CTL_VF_QUEUE 0x0 #define I40E_QTX_CTL_VM_QUEUE 0x1 #define I40E_QTX_CTL_PF_QUEUE 0x2 /* debug masks - set these bits in hw->debug_mask to control output */ enum i40e_debug_mask { I40E_DEBUG_INIT = 0x00000001, I40E_DEBUG_RELEASE = 0x00000002, I40E_DEBUG_LINK = 0x00000010, I40E_DEBUG_PHY = 0x00000020, I40E_DEBUG_HMC = 0x00000040, I40E_DEBUG_NVM = 0x00000080, I40E_DEBUG_LAN = 0x00000100, I40E_DEBUG_FLOW = 0x00000200, I40E_DEBUG_DCB = 0x00000400, I40E_DEBUG_DIAG = 0x00000800, I40E_DEBUG_FD = 0x00001000, I40E_DEBUG_AQ_MESSAGE = 0x01000000, I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000, I40E_DEBUG_AQ_COMMAND = 0x06000000, I40E_DEBUG_AQ = 0x0F000000, I40E_DEBUG_USER = 0xF0000000, I40E_DEBUG_ALL = 0xFFFFFFFF }; /* PCI Bus Info */ #define I40E_PCI_LINK_STATUS 0xB2 #define I40E_PCI_LINK_WIDTH 0x3F0 #define I40E_PCI_LINK_WIDTH_1 0x10 #define I40E_PCI_LINK_WIDTH_2 0x20 #define I40E_PCI_LINK_WIDTH_4 0x40 #define I40E_PCI_LINK_WIDTH_8 0x80 #define I40E_PCI_LINK_SPEED 0xF #define I40E_PCI_LINK_SPEED_2500 0x1 #define I40E_PCI_LINK_SPEED_5000 0x2 #define I40E_PCI_LINK_SPEED_8000 0x3 /* Memory types */ enum i40e_memset_type { I40E_NONDMA_MEM = 0, I40E_DMA_MEM }; /* Memcpy types */ enum i40e_memcpy_type { I40E_NONDMA_TO_NONDMA = 0, I40E_NONDMA_TO_DMA, I40E_DMA_TO_DMA, I40E_DMA_TO_NONDMA }; -#define I40E_FW_API_VERSION_MINOR_X710 0x0004 +#define I40E_FW_API_VERSION_MINOR_X710 0x0005 /* These are structs for managing the hardware information and the operations. * The structures of function pointers are filled out at init time when we * know for sure exactly which hardware we're working with. This gives us the * flexibility of using the same main driver code but adapting to slightly * different hardware needs as new parts are developed. For this architecture, * the Firmware and AdminQ are intended to insulate the driver from most of the * future changes, but these structures will also do part of the job. */ enum i40e_mac_type { I40E_MAC_UNKNOWN = 0, I40E_MAC_X710, I40E_MAC_XL710, I40E_MAC_VF, I40E_MAC_GENERIC, }; enum i40e_media_type { I40E_MEDIA_TYPE_UNKNOWN = 0, I40E_MEDIA_TYPE_FIBER, I40E_MEDIA_TYPE_BASET, I40E_MEDIA_TYPE_BACKPLANE, I40E_MEDIA_TYPE_CX4, I40E_MEDIA_TYPE_DA, I40E_MEDIA_TYPE_VIRTUAL }; enum i40e_fc_mode { I40E_FC_NONE = 0, I40E_FC_RX_PAUSE, I40E_FC_TX_PAUSE, I40E_FC_FULL, I40E_FC_PFC, I40E_FC_DEFAULT }; enum i40e_set_fc_aq_failures { I40E_SET_FC_AQ_FAIL_NONE = 0, I40E_SET_FC_AQ_FAIL_GET = 1, I40E_SET_FC_AQ_FAIL_SET = 2, I40E_SET_FC_AQ_FAIL_UPDATE = 4, I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6 }; enum i40e_vsi_type { I40E_VSI_MAIN = 0, I40E_VSI_VMDQ1 = 1, I40E_VSI_VMDQ2 = 2, I40E_VSI_CTRL = 3, I40E_VSI_FCOE = 4, I40E_VSI_MIRROR = 5, I40E_VSI_SRIOV = 6, I40E_VSI_FDIR = 7, I40E_VSI_TYPE_UNKNOWN }; enum i40e_queue_type { I40E_QUEUE_TYPE_RX = 0, I40E_QUEUE_TYPE_TX, I40E_QUEUE_TYPE_PE_CEQ, I40E_QUEUE_TYPE_UNKNOWN }; struct i40e_link_status { enum i40e_aq_phy_type phy_type; enum i40e_aq_link_speed link_speed; u8 link_info; u8 an_info; u8 ext_info; u8 loopback; /* is Link Status Event notification to SW enabled */ bool lse_enable; u16 max_frame_size; bool crc_enable; u8 pacing; u8 requested_speeds; u8 module_type[3]; /* 1st byte: module identifier */ #define I40E_MODULE_TYPE_SFP 0x03 #define I40E_MODULE_TYPE_QSFP 0x0D /* 2nd byte: ethernet compliance codes for 10/40G */ #define I40E_MODULE_TYPE_40G_ACTIVE 0x01 #define I40E_MODULE_TYPE_40G_LR4 0x02 #define I40E_MODULE_TYPE_40G_SR4 0x04 #define I40E_MODULE_TYPE_40G_CR4 0x08 #define I40E_MODULE_TYPE_10G_BASE_SR 0x10 #define I40E_MODULE_TYPE_10G_BASE_LR 0x20 #define I40E_MODULE_TYPE_10G_BASE_LRM 0x40 #define I40E_MODULE_TYPE_10G_BASE_ER 0x80 /* 3rd byte: ethernet compliance codes for 1G */ #define I40E_MODULE_TYPE_1000BASE_SX 0x01 #define I40E_MODULE_TYPE_1000BASE_LX 0x02 #define I40E_MODULE_TYPE_1000BASE_CX 0x04 #define I40E_MODULE_TYPE_1000BASE_T 0x08 }; enum i40e_aq_capabilities_phy_type { I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII), I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX), I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4), I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR), I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4), I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI), I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI), I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI), I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI), I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI), I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU), I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU), I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC), I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC), I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX), I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T), I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T), I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR), I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR), I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU), I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1), I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4), I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4), I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4), I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX), I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX), I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL = BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL), I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2) }; struct i40e_phy_info { struct i40e_link_status link_info; struct i40e_link_status link_info_old; bool get_link_info; enum i40e_media_type media_type; /* all the phy types the NVM is capable of */ u32 phy_types; }; #define I40E_HW_CAP_MAX_GPIO 30 #define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0 #define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1 /* Capabilities of a PF or a VF or the whole device */ struct i40e_hw_capabilities { u32 switch_mode; #define I40E_NVM_IMAGE_TYPE_EVB 0x0 #define I40E_NVM_IMAGE_TYPE_CLOUD 0x2 #define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3 u32 management_mode; u32 npar_enable; u32 os2bmc; u32 valid_functions; bool sr_iov_1_1; bool vmdq; bool evb_802_1_qbg; /* Edge Virtual Bridging */ bool evb_802_1_qbh; /* Bridge Port Extension */ bool dcb; bool fcoe; bool iscsi; /* Indicates iSCSI enabled */ bool flex10_enable; bool flex10_capable; u32 flex10_mode; #define I40E_FLEX10_MODE_UNKNOWN 0x0 #define I40E_FLEX10_MODE_DCC 0x1 #define I40E_FLEX10_MODE_DCI 0x2 u32 flex10_status; #define I40E_FLEX10_STATUS_DCC_ERROR 0x1 #define I40E_FLEX10_STATUS_VC_MODE 0x2 bool mgmt_cem; bool ieee_1588; bool iwarp; bool fd; u32 fd_filters_guaranteed; u32 fd_filters_best_effort; bool rss; u32 rss_table_size; u32 rss_table_entry_width; bool led[I40E_HW_CAP_MAX_GPIO]; bool sdp[I40E_HW_CAP_MAX_GPIO]; u32 nvm_image_type; u32 num_flow_director_filters; u32 num_vfs; u32 vf_base_id; u32 num_vsis; u32 num_rx_qp; u32 num_tx_qp; u32 base_queue; u32 num_msix_vectors; u32 num_msix_vectors_vf; u32 led_pin_num; u32 sdp_pin_num; u32 mdio_port_num; u32 mdio_port_mode; u8 rx_buf_chain_len; u32 enabled_tcmap; u32 maxtc; u64 wr_csr_prot; }; struct i40e_mac_info { enum i40e_mac_type type; u8 addr[I40E_ETH_LENGTH_OF_ADDRESS]; u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS]; u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS]; u8 port_addr[I40E_ETH_LENGTH_OF_ADDRESS]; u16 max_fcoeq; }; enum i40e_aq_resources_ids { I40E_NVM_RESOURCE_ID = 1 }; enum i40e_aq_resource_access_type { I40E_RESOURCE_READ = 1, I40E_RESOURCE_WRITE }; struct i40e_nvm_info { u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */ u32 timeout; /* [ms] */ u16 sr_size; /* Shadow RAM size in words */ bool blank_nvm_mode; /* is NVM empty (no FW present)*/ u16 version; /* NVM package version */ u32 eetrack; /* NVM data version */ u32 oem_ver; /* OEM version info */ }; /* definitions used in NVM update support */ enum i40e_nvmupd_cmd { I40E_NVMUPD_INVALID, I40E_NVMUPD_READ_CON, I40E_NVMUPD_READ_SNT, I40E_NVMUPD_READ_LCB, I40E_NVMUPD_READ_SA, I40E_NVMUPD_WRITE_ERA, I40E_NVMUPD_WRITE_CON, I40E_NVMUPD_WRITE_SNT, I40E_NVMUPD_WRITE_LCB, I40E_NVMUPD_WRITE_SA, I40E_NVMUPD_CSUM_CON, I40E_NVMUPD_CSUM_SA, I40E_NVMUPD_CSUM_LCB, I40E_NVMUPD_STATUS, I40E_NVMUPD_EXEC_AQ, I40E_NVMUPD_GET_AQ_RESULT, }; enum i40e_nvmupd_state { I40E_NVMUPD_STATE_INIT, I40E_NVMUPD_STATE_READING, I40E_NVMUPD_STATE_WRITING, I40E_NVMUPD_STATE_INIT_WAIT, I40E_NVMUPD_STATE_WRITE_WAIT, }; /* nvm_access definition and its masks/shifts need to be accessible to * application, core driver, and shared code. Where is the right file? */ #define I40E_NVM_READ 0xB #define I40E_NVM_WRITE 0xC #define I40E_NVM_MOD_PNT_MASK 0xFF #define I40E_NVM_TRANS_SHIFT 8 #define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT) #define I40E_NVM_CON 0x0 #define I40E_NVM_SNT 0x1 #define I40E_NVM_LCB 0x2 #define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) #define I40E_NVM_ERA 0x4 #define I40E_NVM_CSUM 0x8 #define I40E_NVM_EXEC 0xf #define I40E_NVM_ADAPT_SHIFT 16 #define I40E_NVM_ADAPT_MASK (0xffffULL << I40E_NVM_ADAPT_SHIFT) #define I40E_NVMUPD_MAX_DATA 4096 #define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */ struct i40e_nvm_access { u32 command; u32 config; u32 offset; /* in bytes */ u32 data_size; /* in bytes */ u8 data[1]; }; /* PCI bus types */ enum i40e_bus_type { i40e_bus_type_unknown = 0, i40e_bus_type_pci, i40e_bus_type_pcix, i40e_bus_type_pci_express, i40e_bus_type_reserved }; /* PCI bus speeds */ enum i40e_bus_speed { i40e_bus_speed_unknown = 0, i40e_bus_speed_33 = 33, i40e_bus_speed_66 = 66, i40e_bus_speed_100 = 100, i40e_bus_speed_120 = 120, i40e_bus_speed_133 = 133, i40e_bus_speed_2500 = 2500, i40e_bus_speed_5000 = 5000, i40e_bus_speed_8000 = 8000, i40e_bus_speed_reserved }; /* PCI bus widths */ enum i40e_bus_width { i40e_bus_width_unknown = 0, i40e_bus_width_pcie_x1 = 1, i40e_bus_width_pcie_x2 = 2, i40e_bus_width_pcie_x4 = 4, i40e_bus_width_pcie_x8 = 8, i40e_bus_width_32 = 32, i40e_bus_width_64 = 64, i40e_bus_width_reserved }; /* Bus parameters */ struct i40e_bus_info { enum i40e_bus_speed speed; enum i40e_bus_width width; enum i40e_bus_type type; u16 func; u16 device; u16 lan_id; }; /* Flow control (FC) parameters */ struct i40e_fc_info { enum i40e_fc_mode current_mode; /* FC mode in effect */ enum i40e_fc_mode requested_mode; /* FC mode requested by caller */ }; #define I40E_MAX_TRAFFIC_CLASS 8 #define I40E_MAX_USER_PRIORITY 8 #define I40E_DCBX_MAX_APPS 32 #define I40E_LLDPDU_SIZE 1500 #define I40E_TLV_STATUS_OPER 0x1 #define I40E_TLV_STATUS_SYNC 0x2 #define I40E_TLV_STATUS_ERR 0x4 #define I40E_CEE_OPER_MAX_APPS 3 #define I40E_APP_PROTOID_FCOE 0x8906 #define I40E_APP_PROTOID_ISCSI 0x0cbc #define I40E_APP_PROTOID_FIP 0x8914 #define I40E_APP_SEL_ETHTYPE 0x1 #define I40E_APP_SEL_TCPIP 0x2 #define I40E_CEE_APP_SEL_ETHTYPE 0x0 #define I40E_CEE_APP_SEL_TCPIP 0x1 /* CEE or IEEE 802.1Qaz ETS Configuration data */ struct i40e_dcb_ets_config { u8 willing; u8 cbs; u8 maxtcs; u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; }; /* CEE or IEEE 802.1Qaz PFC Configuration data */ struct i40e_dcb_pfc_config { u8 willing; u8 mbc; u8 pfccap; u8 pfcenable; }; /* CEE or IEEE 802.1Qaz Application Priority data */ struct i40e_dcb_app_priority_table { u8 priority; u8 selector; u16 protocolid; }; struct i40e_dcbx_config { u8 dcbx_mode; #define I40E_DCBX_MODE_CEE 0x1 #define I40E_DCBX_MODE_IEEE 0x2 u8 app_mode; #define I40E_DCBX_APPS_NON_WILLING 0x1 u32 numapps; u32 tlv_status; /* CEE mode TLV status */ struct i40e_dcb_ets_config etscfg; struct i40e_dcb_ets_config etsrec; struct i40e_dcb_pfc_config pfc; struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS]; }; /* Port hardware description */ struct i40e_hw { u8 *hw_addr; void *back; /* subsystem structs */ struct i40e_phy_info phy; struct i40e_mac_info mac; struct i40e_bus_info bus; struct i40e_nvm_info nvm; struct i40e_fc_info fc; /* pci info */ u16 device_id; u16 vendor_id; u16 subsystem_device_id; u16 subsystem_vendor_id; u8 revision_id; u8 port; bool adapter_stopped; /* capabilities for entire device and PCI func */ struct i40e_hw_capabilities dev_caps; struct i40e_hw_capabilities func_caps; /* Flow Director shared filter space */ u16 fdir_shared_filter_count; /* device profile info */ u8 pf_id; u16 main_vsi_seid; /* for multi-function MACs */ u16 partition_id; u16 num_partitions; u16 num_ports; /* Closest numa node to the device */ u16 numa_node; /* Admin Queue info */ struct i40e_adminq_info aq; /* state of nvm update process */ enum i40e_nvmupd_state nvmupd_state; struct i40e_aq_desc nvm_wb_desc; struct i40e_virt_mem nvm_buff; /* HMC info */ struct i40e_hmc_info hmc; /* HMC info struct */ /* LLDP/DCBX Status */ u16 dcbx_status; /* DCBX info */ struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */ struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */ struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */ #define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0) u64 flags; /* debug mask */ u32 debug_mask; char err_str[16]; }; static INLINE bool i40e_is_vf(struct i40e_hw *hw) { return hw->mac.type == I40E_MAC_VF; } struct i40e_driver_version { u8 major_version; u8 minor_version; u8 build_version; u8 subbuild_version; u8 driver_string[32]; }; /* RX Descriptors */ union i40e_16byte_rx_desc { struct { __le64 pkt_addr; /* Packet buffer address */ __le64 hdr_addr; /* Header buffer address */ } read; struct { struct { struct { union { __le16 mirroring_status; __le16 fcoe_ctx_id; } mirr_fcoe; __le16 l2tag1; } lo_dword; union { __le32 rss; /* RSS Hash */ __le32 fd_id; /* Flow director filter id */ __le32 fcoe_param; /* FCoE DDP Context id */ } hi_dword; } qword0; struct { /* ext status/error/pktype/length */ __le64 status_error_len; } qword1; } wb; /* writeback */ }; union i40e_32byte_rx_desc { struct { __le64 pkt_addr; /* Packet buffer address */ __le64 hdr_addr; /* Header buffer address */ /* bit 0 of hdr_buffer_addr is DD bit */ __le64 rsvd1; __le64 rsvd2; } read; struct { struct { struct { union { __le16 mirroring_status; __le16 fcoe_ctx_id; } mirr_fcoe; __le16 l2tag1; } lo_dword; union { __le32 rss; /* RSS Hash */ __le32 fcoe_param; /* FCoE DDP Context id */ /* Flow director filter id in case of * Programming status desc WB */ __le32 fd_id; } hi_dword; } qword0; struct { /* status/error/pktype/length */ __le64 status_error_len; } qword1; struct { __le16 ext_status; /* extended status */ __le16 rsvd; __le16 l2tag2_1; __le16 l2tag2_2; } qword2; struct { union { __le32 flex_bytes_lo; __le32 pe_status; } lo_dword; union { __le32 flex_bytes_hi; __le32 fd_id; } hi_dword; } qword3; } wb; /* writeback */ }; #define I40E_RXD_QW0_MIRROR_STATUS_SHIFT 8 #define I40E_RXD_QW0_MIRROR_STATUS_MASK (0x3FUL << \ I40E_RXD_QW0_MIRROR_STATUS_SHIFT) #define I40E_RXD_QW0_FCOEINDX_SHIFT 0 #define I40E_RXD_QW0_FCOEINDX_MASK (0xFFFUL << \ I40E_RXD_QW0_FCOEINDX_SHIFT) enum i40e_rx_desc_status_bits { /* Note: These are predefined bit offsets */ I40E_RX_DESC_STATUS_DD_SHIFT = 0, I40E_RX_DESC_STATUS_EOF_SHIFT = 1, I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2, I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3, I40E_RX_DESC_STATUS_CRCP_SHIFT = 4, I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, I40E_RX_DESC_STATUS_RESERVED1_SHIFT = 8, I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ I40E_RX_DESC_STATUS_FLM_SHIFT = 11, I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, I40E_RX_DESC_STATUS_RESERVED2_SHIFT = 16, /* 2 BITS */ I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18, I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */ }; #define I40E_RXD_QW1_STATUS_SHIFT 0 #define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) << \ I40E_RXD_QW1_STATUS_SHIFT) #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT #define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT #define I40E_RXD_QW1_STATUS_TSYNVALID_MASK BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) #define I40E_RXD_QW1_STATUS_UMBCAST_SHIFT I40E_RX_DESC_STATUS_UMBCAST #define I40E_RXD_QW1_STATUS_UMBCAST_MASK (0x3UL << \ I40E_RXD_QW1_STATUS_UMBCAST_SHIFT) enum i40e_rx_desc_fltstat_values { I40E_RX_DESC_FLTSTAT_NO_DATA = 0, I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ I40E_RX_DESC_FLTSTAT_RSV = 2, I40E_RX_DESC_FLTSTAT_RSS_HASH = 3, }; #define I40E_RXD_PACKET_TYPE_UNICAST 0 #define I40E_RXD_PACKET_TYPE_MULTICAST 1 #define I40E_RXD_PACKET_TYPE_BROADCAST 2 #define I40E_RXD_PACKET_TYPE_MIRRORED 3 #define I40E_RXD_QW1_ERROR_SHIFT 19 #define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT) enum i40e_rx_desc_error_bits { /* Note: These are predefined bit offsets */ I40E_RX_DESC_ERROR_RXE_SHIFT = 0, I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1, I40E_RX_DESC_ERROR_HBO_SHIFT = 2, I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */ I40E_RX_DESC_ERROR_IPE_SHIFT = 3, I40E_RX_DESC_ERROR_L4E_SHIFT = 4, I40E_RX_DESC_ERROR_EIPE_SHIFT = 5, I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6, I40E_RX_DESC_ERROR_PPRS_SHIFT = 7 }; enum i40e_rx_desc_error_l3l4e_fcoe_masks { I40E_RX_DESC_ERROR_L3L4E_NONE = 0, I40E_RX_DESC_ERROR_L3L4E_PROT = 1, I40E_RX_DESC_ERROR_L3L4E_FC = 2, I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3, I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4 }; #define I40E_RXD_QW1_PTYPE_SHIFT 30 #define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT) /* Packet type non-ip values */ enum i40e_rx_l2_ptype { I40E_RX_PTYPE_L2_RESERVED = 0, I40E_RX_PTYPE_L2_MAC_PAY2 = 1, I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, I40E_RX_PTYPE_L2_FIP_PAY2 = 3, I40E_RX_PTYPE_L2_OUI_PAY2 = 4, I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, I40E_RX_PTYPE_L2_LLDP_PAY2 = 6, I40E_RX_PTYPE_L2_ECP_PAY2 = 7, I40E_RX_PTYPE_L2_EVB_PAY2 = 8, I40E_RX_PTYPE_L2_QCN_PAY2 = 9, I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10, I40E_RX_PTYPE_L2_ARP = 11, I40E_RX_PTYPE_L2_FCOE_PAY3 = 12, I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21, I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58, I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87, I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124, I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153 }; struct i40e_rx_ptype_decoded { u32 ptype:8; u32 known:1; u32 outer_ip:1; u32 outer_ip_ver:1; u32 outer_frag:1; u32 tunnel_type:3; u32 tunnel_end_prot:2; u32 tunnel_end_frag:1; u32 inner_prot:4; u32 payload_layer:3; }; enum i40e_rx_ptype_outer_ip { I40E_RX_PTYPE_OUTER_L2 = 0, I40E_RX_PTYPE_OUTER_IP = 1 }; enum i40e_rx_ptype_outer_ip_ver { I40E_RX_PTYPE_OUTER_NONE = 0, I40E_RX_PTYPE_OUTER_IPV4 = 0, I40E_RX_PTYPE_OUTER_IPV6 = 1 }; enum i40e_rx_ptype_outer_fragmented { I40E_RX_PTYPE_NOT_FRAG = 0, I40E_RX_PTYPE_FRAG = 1 }; enum i40e_rx_ptype_tunnel_type { I40E_RX_PTYPE_TUNNEL_NONE = 0, I40E_RX_PTYPE_TUNNEL_IP_IP = 1, I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2, I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, }; enum i40e_rx_ptype_tunnel_end_prot { I40E_RX_PTYPE_TUNNEL_END_NONE = 0, I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1, I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2, }; enum i40e_rx_ptype_inner_prot { I40E_RX_PTYPE_INNER_PROT_NONE = 0, I40E_RX_PTYPE_INNER_PROT_UDP = 1, I40E_RX_PTYPE_INNER_PROT_TCP = 2, I40E_RX_PTYPE_INNER_PROT_SCTP = 3, I40E_RX_PTYPE_INNER_PROT_ICMP = 4, I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5 }; enum i40e_rx_ptype_payload_layer { I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, }; #define I40E_RX_PTYPE_BIT_MASK 0x0FFFFFFF #define I40E_RX_PTYPE_SHIFT 56 #define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38 #define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ I40E_RXD_QW1_LENGTH_PBUF_SHIFT) #define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52 #define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \ I40E_RXD_QW1_LENGTH_HBUF_SHIFT) #define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 #define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT) #define I40E_RXD_QW1_NEXTP_SHIFT 38 #define I40E_RXD_QW1_NEXTP_MASK (0x1FFFULL << I40E_RXD_QW1_NEXTP_SHIFT) #define I40E_RXD_QW2_EXT_STATUS_SHIFT 0 #define I40E_RXD_QW2_EXT_STATUS_MASK (0xFFFFFUL << \ I40E_RXD_QW2_EXT_STATUS_SHIFT) enum i40e_rx_desc_ext_status_bits { /* Note: These are predefined bit offsets */ I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0, I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, }; #define I40E_RXD_QW2_L2TAG2_SHIFT 0 #define I40E_RXD_QW2_L2TAG2_MASK (0xFFFFUL << I40E_RXD_QW2_L2TAG2_SHIFT) #define I40E_RXD_QW2_L2TAG3_SHIFT 16 #define I40E_RXD_QW2_L2TAG3_MASK (0xFFFFUL << I40E_RXD_QW2_L2TAG3_SHIFT) enum i40e_rx_desc_pe_status_bits { /* Note: These are predefined bit offsets */ I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */ I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */ I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */ I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24, I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25, I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26, I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27, I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28, I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29 }; #define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38 #define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000 #define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2 #define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT) #define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT 0 #define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_MASK (0x7FFFUL << \ I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT) #define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19 #define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT) enum i40e_rx_prog_status_desc_status_bits { /* Note: These are predefined bit offsets */ I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0, I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */ }; enum i40e_rx_prog_status_desc_prog_id_masks { I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1, I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2, I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4, }; enum i40e_rx_prog_status_desc_error_bits { /* Note: These are predefined bit offsets */ I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1, I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 }; #define I40E_TWO_BIT_MASK 0x3 #define I40E_THREE_BIT_MASK 0x7 #define I40E_FOUR_BIT_MASK 0xF #define I40E_EIGHTEEN_BIT_MASK 0x3FFFF /* TX Descriptor */ struct i40e_tx_desc { __le64 buffer_addr; /* Address of descriptor's data buf */ __le64 cmd_type_offset_bsz; }; #define I40E_TXD_QW1_DTYPE_SHIFT 0 #define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT) enum i40e_tx_desc_dtype_value { I40E_TX_DESC_DTYPE_DATA = 0x0, I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */ I40E_TX_DESC_DTYPE_CONTEXT = 0x1, I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2, I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8, I40E_TX_DESC_DTYPE_DDP_CTX = 0x9, I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB, I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC, I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD, I40E_TX_DESC_DTYPE_DESC_DONE = 0xF }; #define I40E_TXD_QW1_CMD_SHIFT 4 #define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT) enum i40e_tx_desc_cmd_bits { I40E_TX_DESC_CMD_EOP = 0x0001, I40E_TX_DESC_CMD_RS = 0x0002, I40E_TX_DESC_CMD_ICRC = 0x0004, I40E_TX_DESC_CMD_IL2TAG1 = 0x0008, I40E_TX_DESC_CMD_DUMMY = 0x0010, I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */ I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */ I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */ I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */ I40E_TX_DESC_CMD_FCOET = 0x0080, I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */ I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */ I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */ I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */ I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */ I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */ I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */ I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */ }; #define I40E_TXD_QW1_OFFSET_SHIFT 16 #define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \ I40E_TXD_QW1_OFFSET_SHIFT) enum i40e_tx_desc_length_fields { /* Note: These are predefined bit offsets */ I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */ I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */ }; #define I40E_TXD_QW1_MACLEN_MASK (0x7FUL << I40E_TX_DESC_LENGTH_MACLEN_SHIFT) #define I40E_TXD_QW1_IPLEN_MASK (0x7FUL << I40E_TX_DESC_LENGTH_IPLEN_SHIFT) #define I40E_TXD_QW1_L4LEN_MASK (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT) #define I40E_TXD_QW1_FCLEN_MASK (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT) #define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34 #define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \ I40E_TXD_QW1_TX_BUF_SZ_SHIFT) #define I40E_TXD_QW1_L2TAG1_SHIFT 48 #define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT) /* Context descriptors */ struct i40e_tx_context_desc { __le32 tunneling_params; __le16 l2tag2; __le16 rsvd; __le64 type_cmd_tso_mss; }; #define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0 #define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT) #define I40E_TXD_CTX_QW1_CMD_SHIFT 4 #define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT) enum i40e_tx_ctx_desc_cmd_bits { I40E_TX_CTX_DESC_TSO = 0x01, I40E_TX_CTX_DESC_TSYN = 0x02, I40E_TX_CTX_DESC_IL2TAG2 = 0x04, I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08, I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00, I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10, I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20, I40E_TX_CTX_DESC_SWTCH_VSI = 0x30, I40E_TX_CTX_DESC_SWPE = 0x40 }; #define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30 #define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) #define I40E_TXD_CTX_QW1_MSS_SHIFT 50 #define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \ I40E_TXD_CTX_QW1_MSS_SHIFT) #define I40E_TXD_CTX_QW1_VSI_SHIFT 50 #define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT) #define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0 #define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \ I40E_TXD_CTX_QW0_EXT_IP_SHIFT) enum i40e_tx_ctx_desc_eipt_offload { I40E_TX_CTX_EXT_IP_NONE = 0x0, I40E_TX_CTX_EXT_IP_IPV6 = 0x1, I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2, I40E_TX_CTX_EXT_IP_IPV4 = 0x3 }; #define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2 #define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT) #define I40E_TXD_CTX_QW0_NATT_SHIFT 9 #define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 #define I40E_TXD_CTX_QW0_EIP_NOINC_MASK BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK #define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12 #define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \ I40E_TXD_CTX_QW0_NATLEN_SHIFT) #define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19 #define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ I40E_TXD_CTX_QW0_DECTTL_SHIFT) struct i40e_nop_desc { __le64 rsvd; __le64 dtype_cmd; }; #define I40E_TXD_NOP_QW1_DTYPE_SHIFT 0 #define I40E_TXD_NOP_QW1_DTYPE_MASK (0xFUL << I40E_TXD_NOP_QW1_DTYPE_SHIFT) #define I40E_TXD_NOP_QW1_CMD_SHIFT 4 #define I40E_TXD_NOP_QW1_CMD_MASK (0x7FUL << I40E_TXD_NOP_QW1_CMD_SHIFT) enum i40e_tx_nop_desc_cmd_bits { /* Note: These are predefined bit offsets */ I40E_TX_NOP_DESC_EOP_SHIFT = 0, I40E_TX_NOP_DESC_RS_SHIFT = 1, I40E_TX_NOP_DESC_RSV_SHIFT = 2 /* 5 bits */ }; struct i40e_filter_program_desc { __le32 qindex_flex_ptype_vsi; __le32 rsvd; __le32 dtype_cmd_cntindex; __le32 fd_id; }; #define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0 #define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \ I40E_TXD_FLTR_QW0_QINDEX_SHIFT) #define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11 #define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \ I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) #define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17 #define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) /* Packet Classifier Types for filters */ enum i40e_filter_pctype { /* Note: Values 0-30 are reserved for future use */ I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, /* Note: Value 32 is reserved for future use */ I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, /* Note: Values 37-40 are reserved for future use */ I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, I40E_FILTER_PCTYPE_FRAG_IPV6 = 46, /* Note: Value 47 is reserved for future use */ I40E_FILTER_PCTYPE_FCOE_OX = 48, I40E_FILTER_PCTYPE_FCOE_RX = 49, I40E_FILTER_PCTYPE_FCOE_OTHER = 50, /* Note: Values 51-62 are reserved for future use */ I40E_FILTER_PCTYPE_L2_PAYLOAD = 63, }; enum i40e_filter_program_desc_dest { I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0, I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1, I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2, }; enum i40e_filter_program_desc_fd_status { I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0, I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1, I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2, I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3, }; #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 #define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) #define I40E_TXD_FLTR_QW1_DTYPE_SHIFT 0 #define I40E_TXD_FLTR_QW1_DTYPE_MASK (0xFUL << I40E_TXD_FLTR_QW1_DTYPE_SHIFT) #define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 #define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT) enum i40e_filter_program_desc_pcmd { I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1, I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2, }; #define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT) #define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) #define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \ I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) #define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20 #define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) enum i40e_filter_type { I40E_FLOW_DIRECTOR_FLTR = 0, I40E_PE_QUAD_HASH_FLTR = 1, I40E_ETHERTYPE_FLTR, I40E_FCOE_CTX_FLTR, I40E_MAC_VLAN_FLTR, I40E_HASH_FLTR }; struct i40e_vsi_context { u16 seid; u16 uplink_seid; u16 vsi_number; u16 vsis_allocated; u16 vsis_unallocated; u16 flags; u8 pf_num; u8 vf_num; u8 connection_type; struct i40e_aqc_vsi_properties_data info; }; struct i40e_veb_context { u16 seid; u16 uplink_seid; u16 veb_number; u16 vebs_allocated; u16 vebs_unallocated; u16 flags; struct i40e_aqc_get_veb_parameters_completion info; }; /* Statistics collected by each port, VSI, VEB, and S-channel */ struct i40e_eth_stats { u64 rx_bytes; /* gorc */ u64 rx_unicast; /* uprc */ u64 rx_multicast; /* mprc */ u64 rx_broadcast; /* bprc */ u64 rx_discards; /* rdpc */ u64 rx_unknown_protocol; /* rupp */ u64 tx_bytes; /* gotc */ u64 tx_unicast; /* uptc */ u64 tx_multicast; /* mptc */ u64 tx_broadcast; /* bptc */ u64 tx_discards; /* tdpc */ u64 tx_errors; /* tepc */ }; /* Statistics collected per VEB per TC */ struct i40e_veb_tc_stats { u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS]; u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS]; u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS]; u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS]; }; /* Statistics collected by the MAC */ struct i40e_hw_port_stats { /* eth stats collected by the port */ struct i40e_eth_stats eth; /* additional port specific stats */ u64 tx_dropped_link_down; /* tdold */ u64 crc_errors; /* crcerrs */ u64 illegal_bytes; /* illerrc */ u64 error_bytes; /* errbc */ u64 mac_local_faults; /* mlfc */ u64 mac_remote_faults; /* mrfc */ u64 rx_length_errors; /* rlec */ u64 link_xon_rx; /* lxonrxc */ u64 link_xoff_rx; /* lxoffrxc */ u64 priority_xon_rx[8]; /* pxonrxc[8] */ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */ u64 link_xon_tx; /* lxontxc */ u64 link_xoff_tx; /* lxofftxc */ u64 priority_xon_tx[8]; /* pxontxc[8] */ u64 priority_xoff_tx[8]; /* pxofftxc[8] */ u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */ u64 rx_size_64; /* prc64 */ u64 rx_size_127; /* prc127 */ u64 rx_size_255; /* prc255 */ u64 rx_size_511; /* prc511 */ u64 rx_size_1023; /* prc1023 */ u64 rx_size_1522; /* prc1522 */ u64 rx_size_big; /* prc9522 */ u64 rx_undersize; /* ruc */ u64 rx_fragments; /* rfc */ u64 rx_oversize; /* roc */ u64 rx_jabber; /* rjc */ u64 tx_size_64; /* ptc64 */ u64 tx_size_127; /* ptc127 */ u64 tx_size_255; /* ptc255 */ u64 tx_size_511; /* ptc511 */ u64 tx_size_1023; /* ptc1023 */ u64 tx_size_1522; /* ptc1522 */ u64 tx_size_big; /* ptc9522 */ u64 mac_short_packet_dropped; /* mspdc */ u64 checksum_error; /* xec */ /* flow director stats */ u64 fd_atr_match; u64 fd_sb_match; u64 fd_atr_tunnel_match; u32 fd_atr_status; u32 fd_sb_status; /* EEE LPI */ u32 tx_lpi_status; u32 rx_lpi_status; u64 tx_lpi_count; /* etlpic */ u64 rx_lpi_count; /* erlpic */ }; /* Checksum and Shadow RAM pointers */ #define I40E_SR_NVM_CONTROL_WORD 0x00 #define I40E_SR_PCIE_ANALOG_CONFIG_PTR 0x03 #define I40E_SR_PHY_ANALOG_CONFIG_PTR 0x04 #define I40E_SR_OPTION_ROM_PTR 0x05 #define I40E_SR_RO_PCIR_REGS_AUTO_LOAD_PTR 0x06 #define I40E_SR_AUTO_GENERATED_POINTERS_PTR 0x07 #define I40E_SR_PCIR_REGS_AUTO_LOAD_PTR 0x08 #define I40E_SR_EMP_GLOBAL_MODULE_PTR 0x09 #define I40E_SR_RO_PCIE_LCB_PTR 0x0A #define I40E_SR_EMP_IMAGE_PTR 0x0B #define I40E_SR_PE_IMAGE_PTR 0x0C #define I40E_SR_CSR_PROTECTED_LIST_PTR 0x0D #define I40E_SR_MNG_CONFIG_PTR 0x0E #define I40E_SR_EMP_MODULE_PTR 0x0F #define I40E_SR_PBA_FLAGS 0x15 #define I40E_SR_PBA_BLOCK_PTR 0x16 #define I40E_SR_BOOT_CONFIG_PTR 0x17 #define I40E_NVM_OEM_VER_OFF 0x83 #define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 #define I40E_SR_NVM_WAKE_ON_LAN 0x19 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 #define I40E_SR_PERMANENT_SAN_MAC_ADDRESS_PTR 0x28 #define I40E_SR_NVM_MAP_VERSION 0x29 #define I40E_SR_NVM_IMAGE_VERSION 0x2A #define I40E_SR_NVM_STRUCTURE_VERSION 0x2B #define I40E_SR_NVM_EETRACK_LO 0x2D #define I40E_SR_NVM_EETRACK_HI 0x2E #define I40E_SR_VPD_PTR 0x2F #define I40E_SR_PXE_SETUP_PTR 0x30 #define I40E_SR_PXE_CONFIG_CUST_OPTIONS_PTR 0x31 #define I40E_SR_NVM_ORIGINAL_EETRACK_LO 0x34 #define I40E_SR_NVM_ORIGINAL_EETRACK_HI 0x35 #define I40E_SR_SW_ETHERNET_MAC_ADDRESS_PTR 0x37 #define I40E_SR_POR_REGS_AUTO_LOAD_PTR 0x38 #define I40E_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A #define I40E_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B #define I40E_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C #define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E #define I40E_SR_SW_CHECKSUM_WORD 0x3F #define I40E_SR_1ST_FREE_PROVISION_AREA_PTR 0x40 #define I40E_SR_4TH_FREE_PROVISION_AREA_PTR 0x42 #define I40E_SR_3RD_FREE_PROVISION_AREA_PTR 0x44 #define I40E_SR_2ND_FREE_PROVISION_AREA_PTR 0x46 #define I40E_SR_EMP_SR_SETTINGS_PTR 0x48 #define I40E_SR_FEATURE_CONFIGURATION_PTR 0x49 #define I40E_SR_CONFIGURATION_METADATA_PTR 0x4D #define I40E_SR_IMMEDIATE_VALUES_PTR 0x4E /* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */ #define I40E_SR_VPD_MODULE_MAX_SIZE 1024 #define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 #define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 #define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) /* Shadow RAM related */ #define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800 #define I40E_SR_BUF_ALIGNMENT 4096 #define I40E_SR_WORDS_IN_1KB 512 /* Checksum should be calculated such that after adding all the words, * including the checksum word itself, the sum should be 0xBABA. */ #define I40E_SR_SW_CHECKSUM_BASE 0xBABA #define I40E_SRRD_SRCTL_ATTEMPTS 100000 enum i40e_switch_element_types { I40E_SWITCH_ELEMENT_TYPE_MAC = 1, I40E_SWITCH_ELEMENT_TYPE_PF = 2, I40E_SWITCH_ELEMENT_TYPE_VF = 3, I40E_SWITCH_ELEMENT_TYPE_EMP = 4, I40E_SWITCH_ELEMENT_TYPE_BMC = 6, I40E_SWITCH_ELEMENT_TYPE_PE = 16, I40E_SWITCH_ELEMENT_TYPE_VEB = 17, I40E_SWITCH_ELEMENT_TYPE_PA = 18, I40E_SWITCH_ELEMENT_TYPE_VSI = 19, }; /* Supported EtherType filters */ enum i40e_ether_type_index { I40E_ETHER_TYPE_1588 = 0, I40E_ETHER_TYPE_FIP = 1, I40E_ETHER_TYPE_OUI_EXTENDED = 2, I40E_ETHER_TYPE_MAC_CONTROL = 3, I40E_ETHER_TYPE_LLDP = 4, I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5, I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6, I40E_ETHER_TYPE_QCN_CNM = 7, I40E_ETHER_TYPE_8021X = 8, I40E_ETHER_TYPE_ARP = 9, I40E_ETHER_TYPE_RSV1 = 10, I40E_ETHER_TYPE_RSV2 = 11, }; /* Filter context base size is 1K */ #define I40E_HASH_FILTER_BASE_SIZE 1024 /* Supported Hash filter values */ enum i40e_hash_filter_size { I40E_HASH_FILTER_SIZE_1K = 0, I40E_HASH_FILTER_SIZE_2K = 1, I40E_HASH_FILTER_SIZE_4K = 2, I40E_HASH_FILTER_SIZE_8K = 3, I40E_HASH_FILTER_SIZE_16K = 4, I40E_HASH_FILTER_SIZE_32K = 5, I40E_HASH_FILTER_SIZE_64K = 6, I40E_HASH_FILTER_SIZE_128K = 7, I40E_HASH_FILTER_SIZE_256K = 8, I40E_HASH_FILTER_SIZE_512K = 9, I40E_HASH_FILTER_SIZE_1M = 10, }; /* DMA context base size is 0.5K */ #define I40E_DMA_CNTX_BASE_SIZE 512 /* Supported DMA context values */ enum i40e_dma_cntx_size { I40E_DMA_CNTX_SIZE_512 = 0, I40E_DMA_CNTX_SIZE_1K = 1, I40E_DMA_CNTX_SIZE_2K = 2, I40E_DMA_CNTX_SIZE_4K = 3, I40E_DMA_CNTX_SIZE_8K = 4, I40E_DMA_CNTX_SIZE_16K = 5, I40E_DMA_CNTX_SIZE_32K = 6, I40E_DMA_CNTX_SIZE_64K = 7, I40E_DMA_CNTX_SIZE_128K = 8, I40E_DMA_CNTX_SIZE_256K = 9, }; /* Supported Hash look up table (LUT) sizes */ enum i40e_hash_lut_size { I40E_HASH_LUT_SIZE_128 = 0, I40E_HASH_LUT_SIZE_512 = 1, }; /* Structure to hold a per PF filter control settings */ struct i40e_filter_control_settings { /* number of PE Quad Hash filter buckets */ enum i40e_hash_filter_size pe_filt_num; /* number of PE Quad Hash contexts */ enum i40e_dma_cntx_size pe_cntx_num; /* number of FCoE filter buckets */ enum i40e_hash_filter_size fcoe_filt_num; /* number of FCoE DDP contexts */ enum i40e_dma_cntx_size fcoe_cntx_num; /* size of the Hash LUT */ enum i40e_hash_lut_size hash_lut_size; /* enable FDIR filters for PF and its VFs */ bool enable_fdir; /* enable Ethertype filters for PF and its VFs */ bool enable_ethtype; /* enable MAC/VLAN filters for PF and its VFs */ bool enable_macvlan; }; /* Structure to hold device level control filter counts */ struct i40e_control_filter_stats { u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */ u16 etype_used; /* Used perfect EtherType filters */ u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */ u16 etype_free; /* Un-used perfect EtherType filters */ }; enum i40e_reset_type { I40E_RESET_POR = 0, I40E_RESET_CORER = 1, I40E_RESET_GLOBR = 2, I40E_RESET_EMPR = 3, }; /* IEEE 802.1AB LLDP Agent Variables from NVM */ #define I40E_NVM_LLDP_CFG_PTR 0xD struct i40e_lldp_variables { u16 length; u16 adminstatus; u16 msgfasttx; u16 msgtxinterval; u16 txparams; u16 timers; u16 crc8; }; /* Offsets into Alternate Ram */ #define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */ #define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */ #define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */ #define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */ #define I40E_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */ #define I40E_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */ /* Alternate Ram Bandwidth Masks */ #define I40E_ALT_BW_VALUE_MASK 0xFF #define I40E_ALT_BW_RELATIVE_MASK 0x40000000 #define I40E_ALT_BW_VALID_MASK 0x80000000 /* RSS Hash Table Size */ #define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 #endif /* _I40E_TYPE_H_ */ Index: head/sys/dev/ixl/if_ixl.c =================================================================== --- head/sys/dev/ixl/if_ixl.c (revision 299550) +++ head/sys/dev/ixl/if_ixl.c (revision 299551) @@ -1,7098 +1,7098 @@ /****************************************************************************** Copyright (c) 2013-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef IXL_STANDALONE_BUILD #include "opt_inet.h" #include "opt_inet6.h" #include "opt_rss.h" #endif #include "ixl.h" #include "ixl_pf.h" #ifdef RSS #include #endif /********************************************************************* * Driver version *********************************************************************/ -char ixl_driver_version[] = "1.4.12-k"; +char ixl_driver_version[] = "1.4.13-k"; /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on * Last field stores an index into ixl_strings * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } *********************************************************************/ static ixl_vendor_info_t ixl_vendor_info_array[] = { {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0}, /* required last entry */ {0, 0, 0, 0, 0} }; /********************************************************************* * Table of branding strings *********************************************************************/ static char *ixl_strings[] = { "Intel(R) Ethernet Connection XL710 Driver" }; /********************************************************************* * Function prototypes *********************************************************************/ static int ixl_probe(device_t); static int ixl_attach(device_t); static int ixl_detach(device_t); static int ixl_shutdown(device_t); static int ixl_get_hw_capabilities(struct ixl_pf *); static void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int); static int ixl_ioctl(struct ifnet *, u_long, caddr_t); static void ixl_init(void *); static void ixl_init_locked(struct ixl_pf *); static void ixl_stop(struct ixl_pf *); static void ixl_stop_locked(struct ixl_pf *); static void ixl_media_status(struct ifnet *, struct ifmediareq *); static int ixl_media_change(struct ifnet *); static void ixl_update_link_status(struct ixl_pf *); static int ixl_allocate_pci_resources(struct ixl_pf *); static u16 ixl_get_bus_info(struct i40e_hw *, device_t); static int ixl_setup_stations(struct ixl_pf *); static int ixl_switch_config(struct ixl_pf *); static int ixl_initialize_vsi(struct ixl_vsi *); static int ixl_assign_vsi_msix(struct ixl_pf *); static int ixl_assign_vsi_legacy(struct ixl_pf *); static int ixl_init_msix(struct ixl_pf *); static void ixl_configure_msix(struct ixl_pf *); static void ixl_configure_itr(struct ixl_pf *); static void ixl_configure_legacy(struct ixl_pf *); static void ixl_init_taskqueues(struct ixl_pf *); static void ixl_free_taskqueues(struct ixl_pf *); static void ixl_free_interrupt_resources(struct ixl_pf *); static void ixl_free_pci_resources(struct ixl_pf *); static void ixl_local_timer(void *); static int ixl_setup_interface(device_t, struct ixl_vsi *); static void ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *); static void ixl_config_rss(struct ixl_vsi *); static void ixl_set_queue_rx_itr(struct ixl_queue *); static void ixl_set_queue_tx_itr(struct ixl_queue *); static int ixl_set_advertised_speeds(struct ixl_pf *, int); static int ixl_enable_rings(struct ixl_vsi *); static int ixl_disable_rings(struct ixl_vsi *); static void ixl_enable_intr(struct ixl_vsi *); static void ixl_disable_intr(struct ixl_vsi *); static void ixl_disable_rings_intr(struct ixl_vsi *); static void ixl_enable_adminq(struct i40e_hw *); static void ixl_disable_adminq(struct i40e_hw *); static void ixl_enable_queue(struct i40e_hw *, int); static void ixl_disable_queue(struct i40e_hw *, int); static void ixl_enable_legacy(struct i40e_hw *); static void ixl_disable_legacy(struct i40e_hw *); static void ixl_set_promisc(struct ixl_vsi *); static void ixl_add_multi(struct ixl_vsi *); static void ixl_del_multi(struct ixl_vsi *); static void ixl_register_vlan(void *, struct ifnet *, u16); static void ixl_unregister_vlan(void *, struct ifnet *, u16); static void ixl_setup_vlan_filters(struct ixl_vsi *); static void ixl_init_filters(struct ixl_vsi *); static void ixl_reconfigure_filters(struct ixl_vsi *vsi); static void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan); static void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan); static void ixl_add_hw_filters(struct ixl_vsi *, int, int); static void ixl_del_hw_filters(struct ixl_vsi *, int); static struct ixl_mac_filter * ixl_find_filter(struct ixl_vsi *, u8 *, s16); static void ixl_add_mc_filter(struct ixl_vsi *, u8 *); static void ixl_free_mac_filters(struct ixl_vsi *vsi); /* Sysctls*/ static void ixl_add_device_sysctls(struct ixl_pf *); static int ixl_debug_info(SYSCTL_HANDLER_ARGS); static void ixl_print_debug_info(struct ixl_pf *); static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS); static int ixl_set_advertise(SYSCTL_HANDLER_ARGS); static int ixl_current_speed(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS); #ifdef IXL_DEBUG_SYSCTL static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS); #endif /* The MSI/X Interrupt handlers */ static void ixl_intr(void *); static void ixl_msix_que(void *); static void ixl_msix_adminq(void *); static void ixl_handle_mdd_event(struct ixl_pf *); /* Deferred interrupt tasklets */ static void ixl_do_adminq(void *, int); /* Statistics */ static void ixl_add_hw_stats(struct ixl_pf *); static void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *, struct sysctl_oid_list *, struct i40e_hw_port_stats *); static void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *, struct sysctl_oid_list *, struct i40e_eth_stats *); static void ixl_update_stats_counters(struct ixl_pf *); static void ixl_update_eth_stats(struct ixl_vsi *); static void ixl_update_vsi_stats(struct ixl_vsi *); static void ixl_pf_reset_stats(struct ixl_pf *); static void ixl_vsi_reset_stats(struct ixl_vsi *); static void ixl_stat_update48(struct i40e_hw *, u32, u32, bool, u64 *, u64 *); static void ixl_stat_update32(struct i40e_hw *, u32, bool, u64 *, u64 *); /* NVM update */ static int ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *); #ifdef PCI_IOV static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err); static int ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t*); static void ixl_iov_uninit(device_t dev); static int ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*); static void ixl_handle_vf_msg(struct ixl_pf *, struct i40e_arq_event_info *); static void ixl_handle_vflr(void *arg, int pending); static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf); static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf); #endif /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t ixl_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ixl_probe), DEVMETHOD(device_attach, ixl_attach), DEVMETHOD(device_detach, ixl_detach), DEVMETHOD(device_shutdown, ixl_shutdown), #ifdef PCI_IOV DEVMETHOD(pci_iov_init, ixl_iov_init), DEVMETHOD(pci_iov_uninit, ixl_iov_uninit), DEVMETHOD(pci_iov_add_vf, ixl_add_vf), #endif {0, 0} }; static driver_t ixl_driver = { "ixl", ixl_methods, sizeof(struct ixl_pf), }; devclass_t ixl_devclass; DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0); MODULE_DEPEND(ixl, pci, 1, 1, 1); MODULE_DEPEND(ixl, ether, 1, 1, 1); #ifdef DEV_NETMAP MODULE_DEPEND(ixl, netmap, 1, 1, 1); #endif /* DEV_NETMAP */ /* ** Global reset mutex */ static struct mtx ixl_reset_mtx; /* ** TUNEABLE PARAMETERS: */ static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0, "IXL driver parameters"); /* * MSIX should be the default for best performance, * but this allows it to be forced off for testing. */ static int ixl_enable_msix = 1; TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix); SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0, "Enable MSI-X interrupts"); /* ** Number of descriptors per ring: ** - TX and RX are the same size */ static int ixl_ringsz = DEFAULT_RING; TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz); SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN, &ixl_ringsz, 0, "Descriptor Ring Size"); /* ** This can be set manually, if left as 0 the ** number of queues will be calculated based ** on cpus and msix vectors available. */ int ixl_max_queues = 0; TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues); SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN, &ixl_max_queues, 0, "Number of Queues"); /* ** Controls for Interrupt Throttling ** - true/false for dynamic adjustment ** - default values for static ITR */ int ixl_dynamic_rx_itr = 0; TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr); SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN, &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate"); int ixl_dynamic_tx_itr = 0; TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr); SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN, &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate"); int ixl_rx_itr = IXL_ITR_8K; TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr); SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN, &ixl_rx_itr, 0, "RX Interrupt Rate"); int ixl_tx_itr = IXL_ITR_4K; TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr); SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN, &ixl_tx_itr, 0, "TX Interrupt Rate"); #ifdef IXL_FDIR static int ixl_enable_fdir = 1; TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir); /* Rate at which we sample */ int ixl_atr_rate = 20; TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate); #endif #ifdef DEV_NETMAP #define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */ #include #endif /* DEV_NETMAP */ static char *ixl_fc_string[6] = { "None", "Rx", "Tx", "Full", "Priority", "Default" }; static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations"); static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; /********************************************************************* * Device identification routine * * ixl_probe determines if the driver should be loaded on * the hardware based on PCI vendor/device id of the device. * * return BUS_PROBE_DEFAULT on success, positive on failure *********************************************************************/ static int ixl_probe(device_t dev) { ixl_vendor_info_t *ent; u16 pci_vendor_id, pci_device_id; u16 pci_subvendor_id, pci_subdevice_id; char device_name[256]; static bool lock_init = FALSE; INIT_DEBUGOUT("ixl_probe: begin"); pci_vendor_id = pci_get_vendor(dev); if (pci_vendor_id != I40E_INTEL_VENDOR_ID) return (ENXIO); pci_device_id = pci_get_device(dev); pci_subvendor_id = pci_get_subvendor(dev); pci_subdevice_id = pci_get_subdevice(dev); ent = ixl_vendor_info_array; while (ent->vendor_id != 0) { if ((pci_vendor_id == ent->vendor_id) && (pci_device_id == ent->device_id) && ((pci_subvendor_id == ent->subvendor_id) || (ent->subvendor_id == 0)) && ((pci_subdevice_id == ent->subdevice_id) || (ent->subdevice_id == 0))) { sprintf(device_name, "%s, Version - %s", ixl_strings[ent->index], ixl_driver_version); device_set_desc_copy(dev, device_name); /* One shot mutex init */ if (lock_init == FALSE) { lock_init = TRUE; mtx_init(&ixl_reset_mtx, "ixl_reset", "IXL RESET Lock", MTX_DEF); } return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); } /********************************************************************* * Device initialization routine * * The attach entry point is called when the driver is being loaded. * This routine identifies the type of hardware, allocates all resources * and initializes the hardware. * * return 0 on success, positive on failure *********************************************************************/ static int ixl_attach(device_t dev) { struct ixl_pf *pf; struct i40e_hw *hw; struct ixl_vsi *vsi; u16 bus; int error = 0; #ifdef PCI_IOV nvlist_t *pf_schema, *vf_schema; int iov_error; #endif INIT_DEBUGOUT("ixl_attach: begin"); /* Allocate, clear, and link in our primary soft structure */ pf = device_get_softc(dev); pf->dev = pf->osdep.dev = dev; hw = &pf->hw; /* ** Note this assumes we have a single embedded VSI, ** this could be enhanced later to allocate multiple */ vsi = &pf->vsi; vsi->dev = pf->dev; /* Core Lock Init*/ IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev)); /* Set up the timer callout */ callout_init_mtx(&pf->timer, &pf->pf_mtx, 0); /* Save off the PCI information */ hw->vendor_id = pci_get_vendor(dev); hw->device_id = pci_get_device(dev); hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); hw->subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); hw->subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); hw->bus.device = pci_get_slot(dev); hw->bus.func = pci_get_function(dev); pf->vc_debug_lvl = 1; /* Do PCI setup - map BAR0, etc */ if (ixl_allocate_pci_resources(pf)) { device_printf(dev, "Allocation of PCI resources failed\n"); error = ENXIO; goto err_out; } /* Establish a clean starting point */ i40e_clear_hw(hw); error = i40e_pf_reset(hw); if (error) { device_printf(dev, "PF reset failure %d\n", error); error = EIO; goto err_out; } /* Set admin queue parameters */ hw->aq.num_arq_entries = IXL_AQ_LEN; hw->aq.num_asq_entries = IXL_AQ_LEN; hw->aq.arq_buf_size = IXL_AQ_BUFSZ; hw->aq.asq_buf_size = IXL_AQ_BUFSZ; /* Initialize mac filter list for VSI */ SLIST_INIT(&vsi->ftl); /* Initialize the shared code */ error = i40e_init_shared_code(hw); if (error) { device_printf(dev, "Unable to initialize shared code, error %d\n", error); error = EIO; goto err_out; } /* Set up the admin queue */ error = i40e_init_adminq(hw); if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) { device_printf(dev, "Unable to initialize Admin Queue, error %d\n", error); error = EIO; goto err_out; } device_printf(dev, "%s\n", ixl_fw_version_str(hw)); if (error == I40E_ERR_FIRMWARE_API_VERSION) { device_printf(dev, "The driver for the device stopped " "because the NVM image is newer than expected.\n" "You must install the most recent version of " "the network driver.\n"); error = EIO; goto err_out; } if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) device_printf(dev, "The driver for the device detected " "a newer version of the NVM image than expected.\n" "Please install the most recent version of the network driver.\n"); else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR || hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1)) device_printf(dev, "The driver for the device detected " "an older version of the NVM image than expected.\n" "Please update the NVM image.\n"); /* Clear PXE mode */ i40e_clear_pxe_mode(hw); /* Get capabilities from the device */ error = ixl_get_hw_capabilities(pf); if (error) { device_printf(dev, "HW capabilities failure!\n"); goto err_get_cap; } /* Set up host memory cache */ error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, hw->func_caps.num_rx_qp, 0, 0); if (error) { device_printf(dev, "init_lan_hmc failed: %d\n", error); goto err_get_cap; } error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); if (error) { device_printf(dev, "configure_lan_hmc failed: %d\n", error); goto err_mac_hmc; } /* Disable LLDP from the firmware */ i40e_aq_stop_lldp(hw, TRUE, NULL); i40e_get_mac_addr(hw, hw->mac.addr); error = i40e_validate_mac_addr(hw->mac.addr); if (error) { device_printf(dev, "validate_mac_addr failed: %d\n", error); goto err_mac_hmc; } bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN); i40e_get_port_mac_addr(hw, hw->mac.port_addr); /* Set up VSI and queues */ if (ixl_setup_stations(pf) != 0) { device_printf(dev, "setup stations failed!\n"); error = ENOMEM; goto err_mac_hmc; } if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || (hw->aq.fw_maj_ver < 4)) { i40e_msec_delay(75); error = i40e_aq_set_link_restart_an(hw, TRUE, NULL); if (error) { device_printf(dev, "link restart failed, aq_err=%d\n", pf->hw.aq.asq_last_status); goto err_late; } } /* Determine link state */ hw->phy.get_link_info = TRUE; i40e_get_link_status(hw, &pf->link_up); /* Setup OS network interface / ifnet */ if (ixl_setup_interface(dev, vsi) != 0) { device_printf(dev, "interface setup failed!\n"); error = EIO; goto err_late; } error = ixl_switch_config(pf); if (error) { device_printf(dev, "Initial ixl_switch_config() failed: %d\n", error); goto err_late; } /* Limit PHY interrupts to link, autoneg, and modules failure */ error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK, NULL); if (error) { device_printf(dev, "i40e_aq_set_phy_mask() failed: err %d," " aq_err %d\n", error, hw->aq.asq_last_status); goto err_late; } /* Get the bus configuration and set the shared code */ bus = ixl_get_bus_info(hw, dev); i40e_set_pci_config_data(hw, bus); /* Initialize taskqueues */ ixl_init_taskqueues(pf); /* Initialize statistics & add sysctls */ ixl_add_device_sysctls(pf); ixl_pf_reset_stats(pf); ixl_update_stats_counters(pf); ixl_add_hw_stats(pf); /* Register for VLAN events */ vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST); vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST); #ifdef PCI_IOV /* SR-IOV is only supported when MSI-X is in use. */ if (pf->msix > 1) { pf_schema = pci_iov_schema_alloc_node(); vf_schema = pci_iov_schema_alloc_node(); pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof", IOV_SCHEMA_HASDEFAULT, TRUE); pci_iov_schema_add_bool(vf_schema, "allow-set-mac", IOV_SCHEMA_HASDEFAULT, FALSE); pci_iov_schema_add_bool(vf_schema, "allow-promisc", IOV_SCHEMA_HASDEFAULT, FALSE); iov_error = pci_iov_attach(dev, pf_schema, vf_schema); if (iov_error != 0) device_printf(dev, "Failed to initialize SR-IOV (error=%d)\n", iov_error); } #endif #ifdef DEV_NETMAP ixl_netmap_attach(vsi); #endif /* DEV_NETMAP */ INIT_DEBUGOUT("ixl_attach: end"); return (0); err_late: if (vsi->ifp != NULL) if_free(vsi->ifp); err_mac_hmc: i40e_shutdown_lan_hmc(hw); err_get_cap: i40e_shutdown_adminq(hw); err_out: ixl_free_pci_resources(pf); ixl_free_vsi(vsi); IXL_PF_LOCK_DESTROY(pf); return (error); } /********************************************************************* * Device removal routine * * The detach entry point is called when the driver is being removed. * This routine stops the adapter and deallocates all the resources * that were allocated for driver operation. * * return 0 on success, positive on failure *********************************************************************/ static int ixl_detach(device_t dev) { struct ixl_pf *pf = device_get_softc(dev); struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; i40e_status status; #ifdef PCI_IOV int error; #endif INIT_DEBUGOUT("ixl_detach: begin"); /* Make sure VLANS are not using driver */ if (vsi->ifp->if_vlantrunk != NULL) { device_printf(dev,"Vlan in use, detach first\n"); return (EBUSY); } #ifdef PCI_IOV error = pci_iov_detach(dev); if (error != 0) { device_printf(dev, "SR-IOV in use; detach first.\n"); return (error); } #endif ether_ifdetach(vsi->ifp); if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) ixl_stop(pf); ixl_free_taskqueues(pf); /* Shutdown LAN HMC */ status = i40e_shutdown_lan_hmc(hw); if (status) device_printf(dev, "Shutdown LAN HMC failed with code %d\n", status); /* Shutdown admin queue */ status = i40e_shutdown_adminq(hw); if (status) device_printf(dev, "Shutdown Admin queue failed with code %d\n", status); /* Unregister VLAN events */ if (vsi->vlan_attach != NULL) EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach); if (vsi->vlan_detach != NULL) EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach); callout_drain(&pf->timer); #ifdef DEV_NETMAP netmap_detach(vsi->ifp); #endif /* DEV_NETMAP */ ixl_free_pci_resources(pf); bus_generic_detach(dev); if_free(vsi->ifp); ixl_free_vsi(vsi); IXL_PF_LOCK_DESTROY(pf); return (0); } /********************************************************************* * * Shutdown entry point * **********************************************************************/ static int ixl_shutdown(device_t dev) { struct ixl_pf *pf = device_get_softc(dev); ixl_stop(pf); return (0); } /********************************************************************* * * Get the hardware capabilities * **********************************************************************/ static int ixl_get_hw_capabilities(struct ixl_pf *pf) { struct i40e_aqc_list_capabilities_element_resp *buf; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; int error, len; u16 needed; bool again = TRUE; len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); retry: if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *) malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate cap memory\n"); return (ENOMEM); } /* This populates the hw struct */ error = i40e_aq_discover_capabilities(hw, buf, len, &needed, i40e_aqc_opc_list_func_capabilities, NULL); free(buf, M_DEVBUF); if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) && (again == TRUE)) { /* retry once with a larger buffer */ again = FALSE; len = needed; goto retry; } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { device_printf(dev, "capability discovery failed: %d\n", pf->hw.aq.asq_last_status); return (ENODEV); } /* Capture this PF's starting queue pair */ pf->qbase = hw->func_caps.base_queue; #ifdef IXL_DEBUG device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, " "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n", hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors, hw->func_caps.num_msix_vectors_vf, hw->func_caps.fd_filters_guaranteed, hw->func_caps.fd_filters_best_effort, hw->func_caps.num_tx_qp, hw->func_caps.num_rx_qp, hw->func_caps.base_queue); #endif return (error); } static void ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask) { device_t dev = vsi->dev; /* Enable/disable TXCSUM/TSO4 */ if (!(ifp->if_capenable & IFCAP_TXCSUM) && !(ifp->if_capenable & IFCAP_TSO4)) { if (mask & IFCAP_TXCSUM) { ifp->if_capenable |= IFCAP_TXCSUM; /* enable TXCSUM, restore TSO if previously enabled */ if (vsi->flags & IXL_FLAGS_KEEP_TSO4) { vsi->flags &= ~IXL_FLAGS_KEEP_TSO4; ifp->if_capenable |= IFCAP_TSO4; } } else if (mask & IFCAP_TSO4) { ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4); vsi->flags &= ~IXL_FLAGS_KEEP_TSO4; device_printf(dev, "TSO4 requires txcsum, enabling both...\n"); } } else if((ifp->if_capenable & IFCAP_TXCSUM) && !(ifp->if_capenable & IFCAP_TSO4)) { if (mask & IFCAP_TXCSUM) ifp->if_capenable &= ~IFCAP_TXCSUM; else if (mask & IFCAP_TSO4) ifp->if_capenable |= IFCAP_TSO4; } else if((ifp->if_capenable & IFCAP_TXCSUM) && (ifp->if_capenable & IFCAP_TSO4)) { if (mask & IFCAP_TXCSUM) { vsi->flags |= IXL_FLAGS_KEEP_TSO4; ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4); device_printf(dev, "TSO4 requires txcsum, disabling both...\n"); } else if (mask & IFCAP_TSO4) ifp->if_capenable &= ~IFCAP_TSO4; } /* Enable/disable TXCSUM_IPV6/TSO6 */ if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6) && !(ifp->if_capenable & IFCAP_TSO6)) { if (mask & IFCAP_TXCSUM_IPV6) { ifp->if_capenable |= IFCAP_TXCSUM_IPV6; if (vsi->flags & IXL_FLAGS_KEEP_TSO6) { vsi->flags &= ~IXL_FLAGS_KEEP_TSO6; ifp->if_capenable |= IFCAP_TSO6; } } else if (mask & IFCAP_TSO6) { ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); vsi->flags &= ~IXL_FLAGS_KEEP_TSO6; device_printf(dev, "TSO6 requires txcsum6, enabling both...\n"); } } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6) && !(ifp->if_capenable & IFCAP_TSO6)) { if (mask & IFCAP_TXCSUM_IPV6) ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6; else if (mask & IFCAP_TSO6) ifp->if_capenable |= IFCAP_TSO6; } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6) && (ifp->if_capenable & IFCAP_TSO6)) { if (mask & IFCAP_TXCSUM_IPV6) { vsi->flags |= IXL_FLAGS_KEEP_TSO6; ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); device_printf(dev, "TSO6 requires txcsum6, disabling both...\n"); } else if (mask & IFCAP_TSO6) ifp->if_capenable &= ~IFCAP_TSO6; } } /********************************************************************* * Ioctl entry point * * ixl_ioctl is called when the user wants to configure the * interface. * * return 0 on success, positive on failure **********************************************************************/ static int ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data) { struct ixl_vsi *vsi = ifp->if_softc; struct ixl_pf *pf = vsi->back; struct ifreq *ifr = (struct ifreq *)data; struct ifdrv *ifd = (struct ifdrv *)data; #if defined(INET) || defined(INET6) struct ifaddr *ifa = (struct ifaddr *)data; bool avoid_reset = FALSE; #endif int error = 0; switch (command) { case SIOCSIFADDR: #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) avoid_reset = TRUE; #endif #ifdef INET6 if (ifa->ifa_addr->sa_family == AF_INET6) avoid_reset = TRUE; #endif #if defined(INET) || defined(INET6) /* ** Calling init results in link renegotiation, ** so we avoid doing it when possible. */ if (avoid_reset) { ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ixl_init(pf); #ifdef INET if (!(ifp->if_flags & IFF_NOARP)) arp_ifinit(ifp, ifa); #endif } else error = ether_ioctl(ifp, command, data); break; #endif case SIOCSIFMTU: IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); if (ifr->ifr_mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) { error = EINVAL; } else { IXL_PF_LOCK(pf); ifp->if_mtu = ifr->ifr_mtu; vsi->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; ixl_init_locked(pf); IXL_PF_UNLOCK(pf); } break; case SIOCSIFFLAGS: IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); IXL_PF_LOCK(pf); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { if ((ifp->if_flags ^ pf->if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { ixl_set_promisc(vsi); } } else { IXL_PF_UNLOCK(pf); ixl_init(pf); IXL_PF_LOCK(pf); } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { IXL_PF_UNLOCK(pf); ixl_stop(pf); IXL_PF_LOCK(pf); } } pf->if_flags = ifp->if_flags; IXL_PF_UNLOCK(pf); break; case SIOCSDRVSPEC: case SIOCGDRVSPEC: IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific " "Info)\n"); /* NVM update command */ if (ifd->ifd_cmd == I40E_NVM_ACCESS) error = ixl_handle_nvmupd_cmd(pf, ifd); else error = EINVAL; break; case SIOCADDMULTI: IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { IXL_PF_LOCK(pf); ixl_disable_intr(vsi); ixl_add_multi(vsi); ixl_enable_intr(vsi); IXL_PF_UNLOCK(pf); } break; case SIOCDELMULTI: IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { IXL_PF_LOCK(pf); ixl_disable_intr(vsi); ixl_del_multi(vsi); ixl_enable_intr(vsi); IXL_PF_UNLOCK(pf); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: #ifdef IFM_ETH_XTYPE case SIOCGIFXMEDIA: #endif IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); error = ifmedia_ioctl(ifp, ifr, &vsi->media, command); break; case SIOCSIFCAP: { int mask = ifr->ifr_reqcap ^ ifp->if_capenable; IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); ixl_cap_txcsum_tso(vsi, ifp, mask); if (mask & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; if (mask & IFCAP_RXCSUM_IPV6) ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; if (mask & IFCAP_LRO) ifp->if_capenable ^= IFCAP_LRO; if (mask & IFCAP_VLAN_HWTAGGING) ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (mask & IFCAP_VLAN_HWFILTER) ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; if (mask & IFCAP_VLAN_HWTSO) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { IXL_PF_LOCK(pf); ixl_init_locked(pf); IXL_PF_UNLOCK(pf); } VLAN_CAPABILITIES(ifp); break; } default: IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command); error = ether_ioctl(ifp, command, data); break; } return (error); } /********************************************************************* * Init entry point * * This routine is used in two ways. It is used by the stack as * init entry point in network interface structure. It is also used * by the driver as a hw/sw initialization routine to get to a * consistent state. * * return 0 on success, positive on failure **********************************************************************/ static void ixl_init_locked(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; struct ifnet *ifp = vsi->ifp; device_t dev = pf->dev; struct i40e_filter_control_settings filter; u8 tmpaddr[ETHER_ADDR_LEN]; int ret; mtx_assert(&pf->pf_mtx, MA_OWNED); INIT_DEBUGOUT("ixl_init: begin"); ixl_stop_locked(pf); /* Get the latest mac address... User might use a LAA */ bcopy(IF_LLADDR(vsi->ifp), tmpaddr, I40E_ETH_LENGTH_OF_ADDRESS); if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) { ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); bcopy(tmpaddr, hw->mac.addr, I40E_ETH_LENGTH_OF_ADDRESS); ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY, hw->mac.addr, NULL); if (ret) { device_printf(dev, "LLA address" "change failed!!\n"); return; } else { - ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); } } + ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); + /* Set the various hardware offload abilities */ ifp->if_hwassist = 0; if (ifp->if_capenable & IFCAP_TSO) ifp->if_hwassist |= CSUM_TSO; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6); /* Set up the device filtering */ bzero(&filter, sizeof(filter)); filter.enable_ethtype = TRUE; filter.enable_macvlan = TRUE; #ifdef IXL_FDIR filter.enable_fdir = TRUE; #endif filter.hash_lut_size = I40E_HASH_LUT_SIZE_512; if (i40e_set_filter_control(hw, &filter)) device_printf(dev, "i40e_set_filter_control() failed\n"); /* Set up RSS */ ixl_config_rss(vsi); /* Prepare the VSI: rings, hmc contexts, etc... */ if (ixl_initialize_vsi(vsi)) { device_printf(dev, "initialize vsi failed!!\n"); return; } /* Add protocol filters to list */ ixl_init_filters(vsi); /* Setup vlan's if needed */ ixl_setup_vlan_filters(vsi); /* Set up MSI/X routing and the ITR settings */ if (ixl_enable_msix) { ixl_configure_msix(pf); ixl_configure_itr(pf); } else ixl_configure_legacy(pf); ixl_enable_rings(vsi); i40e_aq_set_default_vsi(hw, vsi->seid, NULL); ixl_reconfigure_filters(vsi); - /* Set MTU in hardware*/ - int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size, - TRUE, 0, NULL); - if (aq_error) - device_printf(vsi->dev, - "aq_set_mac_config in init error, code %d\n", - aq_error); - /* And now turn on interrupts */ ixl_enable_intr(vsi); /* Get link info */ hw->phy.get_link_info = TRUE; i40e_get_link_status(hw, &pf->link_up); ixl_update_link_status(pf); /* Start the local timer */ callout_reset(&pf->timer, hz, ixl_local_timer, pf); /* Now inform the stack we're ready */ ifp->if_drv_flags |= IFF_DRV_RUNNING; return; } -// XXX: super experimental stuff static int ixl_teardown_hw_structs(struct ixl_pf *pf) { enum i40e_status_code status = 0; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; /* Shutdown LAN HMC */ if (hw->hmc.hmc_obj) { status = i40e_shutdown_lan_hmc(hw); if (status) { device_printf(dev, "init: LAN HMC shutdown failure; status %d\n", status); goto err_out; } } // XXX: This gets called when we know the adminq is inactive; // so we already know it's setup when we get here. /* Shutdown admin queue */ status = i40e_shutdown_adminq(hw); if (status) device_printf(dev, "init: Admin Queue shutdown failure; status %d\n", status); err_out: return (status); } static int ixl_reset(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; int error = 0; // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary i40e_clear_hw(hw); error = i40e_pf_reset(hw); if (error) { device_printf(dev, "init: PF reset failure"); error = EIO; goto err_out; } error = i40e_init_adminq(hw); if (error) { device_printf(dev, "init: Admin queue init failure; status code %d", error); error = EIO; goto err_out; } i40e_clear_pxe_mode(hw); error = ixl_get_hw_capabilities(pf); if (error) { device_printf(dev, "init: Error retrieving HW capabilities; status code %d\n", error); goto err_out; } error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, hw->func_caps.num_rx_qp, 0, 0); if (error) { device_printf(dev, "init: LAN HMC init failed; status code %d\n", error); error = EIO; goto err_out; } error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); if (error) { device_printf(dev, "init: LAN HMC config failed; status code %d\n", error); error = EIO; goto err_out; } // XXX: need to do switch config here? error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK, NULL); if (error) { device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d," " aq_err %d\n", error, hw->aq.asq_last_status); error = EIO; goto err_out; } u8 set_fc_err_mask; error = i40e_set_fc(hw, &set_fc_err_mask, true); if (error) { device_printf(dev, "init: setting link flow control failed; retcode %d," " fc_err_mask 0x%02x\n", error, set_fc_err_mask); goto err_out; } // XXX: (Rebuild VSIs?) // Firmware delay workaround if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || (hw->aq.fw_maj_ver < 4)) { i40e_msec_delay(75); error = i40e_aq_set_link_restart_an(hw, TRUE, NULL); if (error) { device_printf(dev, "init: link restart failed, aq_err %d\n", hw->aq.asq_last_status); goto err_out; } } // [add_filter_to_drop_tx_flow_control_frames] // - TODO: Implement // i40e_send_version // - TODO: Properly implement struct i40e_driver_version dv; dv.major_version = 1; dv.minor_version = 1; dv.build_version = 1; dv.subbuild_version = 0; // put in a driver version string that is less than 0x80 bytes long bzero(&dv.driver_string, sizeof(dv.driver_string)); i40e_aq_send_driver_version(hw, &dv, NULL); err_out: return (error); } static void ixl_init(void *arg) { struct ixl_pf *pf = arg; int ret = 0; /* * If the aq is dead here, it probably means something outside of the driver * did something to the adapter, like a PF reset. * So rebuild the driver's state here if that occurs. */ if (!i40e_check_asq_alive(&pf->hw)) { device_printf(pf->dev, "asq is not alive; rebuilding...\n"); IXL_PF_LOCK(pf); ixl_teardown_hw_structs(pf); ixl_reset(pf); IXL_PF_UNLOCK(pf); } /* Set up interrupt routing here */ if (pf->msix > 1) ret = ixl_assign_vsi_msix(pf); else ret = ixl_assign_vsi_legacy(pf); if (ret) { device_printf(pf->dev, "assign_vsi_msix/legacy error: %d\n", ret); return; } IXL_PF_LOCK(pf); ixl_init_locked(pf); IXL_PF_UNLOCK(pf); return; } /* ** ** MSIX Interrupt Handlers and Tasklets ** */ static void ixl_handle_que(void *context, int pending) { struct ixl_queue *que = context; struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct tx_ring *txr = &que->txr; struct ifnet *ifp = vsi->ifp; bool more; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { more = ixl_rxeof(que, IXL_RX_LIMIT); IXL_TX_LOCK(txr); ixl_txeof(que); if (!drbr_empty(ifp, txr->br)) ixl_mq_start_locked(ifp, txr); IXL_TX_UNLOCK(txr); if (more) { taskqueue_enqueue(que->tq, &que->task); return; } } /* Reenable this interrupt - hmmm */ ixl_enable_queue(hw, que->me); return; } /********************************************************************* * * Legacy Interrupt Service routine * **********************************************************************/ void ixl_intr(void *arg) { struct ixl_pf *pf = arg; struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; struct ixl_queue *que = vsi->queues; struct ifnet *ifp = vsi->ifp; struct tx_ring *txr = &que->txr; u32 reg, icr0, mask; bool more_tx, more_rx; ++que->irqs; /* Protect against spurious interrupts */ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; icr0 = rd32(hw, I40E_PFINT_ICR0); reg = rd32(hw, I40E_PFINT_DYN_CTL0); reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK; wr32(hw, I40E_PFINT_DYN_CTL0, reg); mask = rd32(hw, I40E_PFINT_ICR0_ENA); #ifdef PCI_IOV if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) taskqueue_enqueue(pf->tq, &pf->vflr_task); #endif if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { taskqueue_enqueue(pf->tq, &pf->adminq); return; } more_rx = ixl_rxeof(que, IXL_RX_LIMIT); IXL_TX_LOCK(txr); more_tx = ixl_txeof(que); if (!drbr_empty(vsi->ifp, txr->br)) more_tx = 1; IXL_TX_UNLOCK(txr); /* re-enable other interrupt causes */ wr32(hw, I40E_PFINT_ICR0_ENA, mask); /* And now the queues */ reg = rd32(hw, I40E_QINT_RQCTL(0)); reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK; wr32(hw, I40E_QINT_RQCTL(0), reg); reg = rd32(hw, I40E_QINT_TQCTL(0)); reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK; reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK; wr32(hw, I40E_QINT_TQCTL(0), reg); ixl_enable_legacy(hw); return; } /********************************************************************* * * MSIX VSI Interrupt Service routine * **********************************************************************/ void ixl_msix_que(void *arg) { struct ixl_queue *que = arg; struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct tx_ring *txr = &que->txr; bool more_tx, more_rx; /* Protect against spurious interrupts */ if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)) return; ++que->irqs; more_rx = ixl_rxeof(que, IXL_RX_LIMIT); IXL_TX_LOCK(txr); more_tx = ixl_txeof(que); /* ** Make certain that if the stack ** has anything queued the task gets ** scheduled to handle it. */ if (!drbr_empty(vsi->ifp, txr->br)) more_tx = 1; IXL_TX_UNLOCK(txr); ixl_set_queue_rx_itr(que); ixl_set_queue_tx_itr(que); if (more_tx || more_rx) taskqueue_enqueue(que->tq, &que->task); else ixl_enable_queue(hw, que->me); return; } /********************************************************************* * * MSIX Admin Queue Interrupt Service routine * **********************************************************************/ static void ixl_msix_adminq(void *arg) { struct ixl_pf *pf = arg; struct i40e_hw *hw = &pf->hw; u32 reg, mask, rstat_reg; bool do_task = FALSE; ++pf->admin_irq; reg = rd32(hw, I40E_PFINT_ICR0); mask = rd32(hw, I40E_PFINT_ICR0_ENA); /* Check on the cause */ if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) { mask &= ~I40E_PFINT_ICR0_ADMINQ_MASK; do_task = TRUE; } if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) { ixl_handle_mdd_event(pf); mask &= ~I40E_PFINT_ICR0_MAL_DETECT_MASK; } if (reg & I40E_PFINT_ICR0_GRST_MASK) { device_printf(pf->dev, "Reset Requested!\n"); rstat_reg = rd32(hw, I40E_GLGEN_RSTAT); rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; device_printf(pf->dev, "Reset type: "); switch (rstat_reg) { /* These others might be handled similarly to an EMPR reset */ case I40E_RESET_CORER: printf("CORER\n"); break; case I40E_RESET_GLOBR: printf("GLOBR\n"); break; case I40E_RESET_EMPR: printf("EMPR\n"); atomic_set_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING); break; default: printf("?\n"); break; } // overload admin queue task to check reset progress? do_task = TRUE; } if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) { device_printf(pf->dev, "ECC Error detected!\n"); } if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) { device_printf(pf->dev, "HMC Error detected!\n"); } if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) { device_printf(pf->dev, "PCI Exception detected!\n"); } #ifdef PCI_IOV if (reg & I40E_PFINT_ICR0_VFLR_MASK) { mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; taskqueue_enqueue(pf->tq, &pf->vflr_task); } #endif reg = rd32(hw, I40E_PFINT_DYN_CTL0); reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK; wr32(hw, I40E_PFINT_DYN_CTL0, reg); if (do_task) taskqueue_enqueue(pf->tq, &pf->adminq); } /********************************************************************* * * Media Ioctl callback * * This routine is called whenever the user queries the status of * the interface using ifconfig. * **********************************************************************/ static void ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) { struct ixl_vsi *vsi = ifp->if_softc; struct ixl_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; INIT_DEBUGOUT("ixl_media_status: begin"); IXL_PF_LOCK(pf); hw->phy.get_link_info = TRUE; i40e_get_link_status(hw, &pf->link_up); ixl_update_link_status(pf); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!pf->link_up) { IXL_PF_UNLOCK(pf); return; } ifmr->ifm_status |= IFM_ACTIVE; /* Hardware always does full-duplex */ ifmr->ifm_active |= IFM_FDX; switch (hw->phy.link_info.phy_type) { /* 100 M */ case I40E_PHY_TYPE_100BASE_TX: ifmr->ifm_active |= IFM_100_TX; break; /* 1 G */ case I40E_PHY_TYPE_1000BASE_T: ifmr->ifm_active |= IFM_1000_T; break; case I40E_PHY_TYPE_1000BASE_SX: ifmr->ifm_active |= IFM_1000_SX; break; case I40E_PHY_TYPE_1000BASE_LX: ifmr->ifm_active |= IFM_1000_LX; break; /* 10 G */ case I40E_PHY_TYPE_10GBASE_SFPP_CU: ifmr->ifm_active |= IFM_10G_TWINAX; break; case I40E_PHY_TYPE_10GBASE_SR: ifmr->ifm_active |= IFM_10G_SR; break; case I40E_PHY_TYPE_10GBASE_LR: ifmr->ifm_active |= IFM_10G_LR; break; case I40E_PHY_TYPE_10GBASE_T: ifmr->ifm_active |= IFM_10G_T; break; /* 40 G */ case I40E_PHY_TYPE_40GBASE_CR4: case I40E_PHY_TYPE_40GBASE_CR4_CU: ifmr->ifm_active |= IFM_40G_CR4; break; case I40E_PHY_TYPE_40GBASE_SR4: ifmr->ifm_active |= IFM_40G_SR4; break; case I40E_PHY_TYPE_40GBASE_LR4: ifmr->ifm_active |= IFM_40G_LR4; break; #ifndef IFM_ETH_XTYPE case I40E_PHY_TYPE_1000BASE_KX: ifmr->ifm_active |= IFM_1000_CX; break; case I40E_PHY_TYPE_10GBASE_CR1_CU: case I40E_PHY_TYPE_10GBASE_CR1: ifmr->ifm_active |= IFM_10G_TWINAX; break; case I40E_PHY_TYPE_10GBASE_KX4: ifmr->ifm_active |= IFM_10G_CX4; break; case I40E_PHY_TYPE_10GBASE_KR: ifmr->ifm_active |= IFM_10G_SR; break; case I40E_PHY_TYPE_40GBASE_KR4: case I40E_PHY_TYPE_XLPPI: ifmr->ifm_active |= IFM_40G_SR4; break; #else case I40E_PHY_TYPE_1000BASE_KX: ifmr->ifm_active |= IFM_1000_KX; break; /* ERJ: What's the difference between these? */ case I40E_PHY_TYPE_10GBASE_CR1_CU: case I40E_PHY_TYPE_10GBASE_CR1: ifmr->ifm_active |= IFM_10G_CR1; break; case I40E_PHY_TYPE_10GBASE_KX4: ifmr->ifm_active |= IFM_10G_KX4; break; case I40E_PHY_TYPE_10GBASE_KR: ifmr->ifm_active |= IFM_10G_KR; break; /* Our single 20G media type */ case I40E_PHY_TYPE_20GBASE_KR2: ifmr->ifm_active |= IFM_20G_KR2; break; case I40E_PHY_TYPE_40GBASE_KR4: ifmr->ifm_active |= IFM_40G_KR4; break; case I40E_PHY_TYPE_XLPPI: ifmr->ifm_active |= IFM_40G_XLPPI; break; #endif default: ifmr->ifm_active |= IFM_UNKNOWN; break; } /* Report flow control status as well */ if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ifmr->ifm_active |= IFM_ETH_TXPAUSE; if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ifmr->ifm_active |= IFM_ETH_RXPAUSE; IXL_PF_UNLOCK(pf); return; } /* * NOTE: Fortville does not support forcing media speeds. Instead, * use the set_advertise sysctl to set the speeds Fortville * will advertise or be allowed to operate at. */ static int ixl_media_change(struct ifnet * ifp) { struct ixl_vsi *vsi = ifp->if_softc; struct ifmedia *ifm = &vsi->media; INIT_DEBUGOUT("ixl_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); if_printf(ifp, "Media change is not supported.\n"); return (ENODEV); } #ifdef IXL_FDIR /* ** ATR: Application Targetted Receive - creates a filter ** based on TX flow info that will keep the receive ** portion of the flow on the same queue. Based on the ** implementation this is only available for TCP connections */ void ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype) { struct ixl_vsi *vsi = que->vsi; struct tx_ring *txr = &que->txr; struct i40e_filter_program_desc *FDIR; u32 ptype, dtype; int idx; /* check if ATR is enabled and sample rate */ if ((!ixl_enable_fdir) || (!txr->atr_rate)) return; /* ** We sample all TCP SYN/FIN packets, ** or at the selected sample rate */ txr->atr_count++; if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) && (txr->atr_count < txr->atr_rate)) return; txr->atr_count = 0; /* Get a descriptor to use */ idx = txr->next_avail; FDIR = (struct i40e_filter_program_desc *) &txr->base[idx]; if (++idx == que->num_desc) idx = 0; txr->avail--; txr->next_avail = idx; ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & I40E_TXD_FLTR_QW0_QINDEX_MASK; ptype |= (etype == ETHERTYPE_IP) ? (I40E_FILTER_PCTYPE_NONF_IPV4_TCP << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) : (I40E_FILTER_PCTYPE_NONF_IPV6_TCP << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT); ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; dtype = I40E_TX_DESC_DTYPE_FILTER_PROG; /* ** We use the TCP TH_FIN as a trigger to remove ** the filter, otherwise its an update. */ dtype |= (th->th_flags & TH_FIN) ? (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << I40E_TXD_FLTR_QW1_PCMD_SHIFT) : (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << I40E_TXD_FLTR_QW1_PCMD_SHIFT); dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX << I40E_TXD_FLTR_QW1_DEST_SHIFT; dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; FDIR->qindex_flex_ptype_vsi = htole32(ptype); FDIR->dtype_cmd_cntindex = htole32(dtype); return; } #endif static void ixl_set_promisc(struct ixl_vsi *vsi) { struct ifnet *ifp = vsi->ifp; struct i40e_hw *hw = vsi->hw; int err, mcnt = 0; bool uni = FALSE, multi = FALSE; if (ifp->if_flags & IFF_ALLMULTI) multi = TRUE; else { /* Need to count the multicast addresses */ struct ifmultiaddr *ifma; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (mcnt == MAX_MULTICAST_ADDR) break; mcnt++; } if_maddr_runlock(ifp); } if (mcnt >= MAX_MULTICAST_ADDR) multi = TRUE; if (ifp->if_flags & IFF_PROMISC) uni = TRUE; err = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, uni, NULL); err = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, multi, NULL); return; } /********************************************************************* * Filter Routines * * Routines for multicast and vlan filter management. * *********************************************************************/ static void ixl_add_multi(struct ixl_vsi *vsi) { struct ifmultiaddr *ifma; struct ifnet *ifp = vsi->ifp; struct i40e_hw *hw = vsi->hw; int mcnt = 0, flags; IOCTL_DEBUGOUT("ixl_add_multi: begin"); if_maddr_rlock(ifp); /* ** First just get a count, to decide if we ** we simply use multicast promiscuous. */ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; mcnt++; } if_maddr_runlock(ifp); if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { /* delete existing MC filters */ ixl_del_hw_filters(vsi, mcnt); i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL); return; } mcnt = 0; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; ixl_add_mc_filter(vsi, (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr)); mcnt++; } if_maddr_runlock(ifp); if (mcnt > 0) { flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC); ixl_add_hw_filters(vsi, flags, mcnt); } IOCTL_DEBUGOUT("ixl_add_multi: end"); return; } static void ixl_del_multi(struct ixl_vsi *vsi) { struct ifnet *ifp = vsi->ifp; struct ifmultiaddr *ifma; struct ixl_mac_filter *f; int mcnt = 0; bool match = FALSE; IOCTL_DEBUGOUT("ixl_del_multi: begin"); /* Search for removed multicast addresses */ if_maddr_rlock(ifp); SLIST_FOREACH(f, &vsi->ftl, next) { if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) { match = FALSE; TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr); if (cmp_etheraddr(f->macaddr, mc_addr)) { match = TRUE; break; } } if (match == FALSE) { f->flags |= IXL_FILTER_DEL; mcnt++; } } } if_maddr_runlock(ifp); if (mcnt > 0) ixl_del_hw_filters(vsi, mcnt); } /********************************************************************* * Timer routine * * This routine checks for link status,updates statistics, * and runs the watchdog check. * + * Only runs when the driver is configured UP and RUNNING. + * **********************************************************************/ static void ixl_local_timer(void *arg) { struct ixl_pf *pf = arg; struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; struct ixl_queue *que = vsi->queues; device_t dev = pf->dev; int hung = 0; u32 mask; mtx_assert(&pf->pf_mtx, MA_OWNED); /* Fire off the adminq task */ taskqueue_enqueue(pf->tq, &pf->adminq); /* Update stats */ ixl_update_stats_counters(pf); /* ** Check status of the queues */ mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK); for (int i = 0; i < vsi->num_queues; i++,que++) { /* Any queues with outstanding work get a sw irq */ if (que->busy) wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask); /* ** Each time txeof runs without cleaning, but there ** are uncleaned descriptors it increments busy. If ** we get to 5 we declare it hung. */ if (que->busy == IXL_QUEUE_HUNG) { ++hung; /* Mark the queue as inactive */ vsi->active_queues &= ~((u64)1 << que->me); continue; } else { /* Check if we've come back from hung */ if ((vsi->active_queues & ((u64)1 << que->me)) == 0) vsi->active_queues |= ((u64)1 << que->me); } if (que->busy >= IXL_MAX_TX_BUSY) { #ifdef IXL_DEBUG device_printf(dev,"Warning queue %d " "appears to be hung!\n", i); #endif que->busy = IXL_QUEUE_HUNG; ++hung; } } /* Only reinit if all queues show hung */ if (hung == vsi->num_queues) goto hung; callout_reset(&pf->timer, hz, ixl_local_timer, pf); return; hung: device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n"); ixl_init_locked(pf); } /* ** Note: this routine updates the OS on the link state ** the real check of the hardware only happens with ** a link interrupt. */ static void ixl_update_link_status(struct ixl_pf *pf) { struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = &pf->hw; struct ifnet *ifp = vsi->ifp; device_t dev = pf->dev; if (pf->link_up) { if (vsi->link_active == FALSE) { pf->fc = hw->fc.current_mode; if (bootverbose) { device_printf(dev,"Link is up %d Gbps %s," " Flow Control: %s\n", ((pf->link_speed == I40E_LINK_SPEED_40GB)? 40:10), "Full Duplex", ixl_fc_string[pf->fc]); } vsi->link_active = TRUE; /* ** Warn user if link speed on NPAR enabled ** partition is not at least 10GB */ if (hw->func_caps.npar_enable && (hw->phy.link_info.link_speed == I40E_LINK_SPEED_1GB || hw->phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) device_printf(dev, "The partition detected" "link speed that is less than 10Gbps\n"); if_link_state_change(ifp, LINK_STATE_UP); } } else { /* Link down */ if (vsi->link_active == TRUE) { if (bootverbose) device_printf(dev, "Link is Down\n"); if_link_state_change(ifp, LINK_STATE_DOWN); vsi->link_active = FALSE; } } return; } static void ixl_stop(struct ixl_pf *pf) { IXL_PF_LOCK(pf); ixl_stop_locked(pf); IXL_PF_UNLOCK(pf); ixl_free_interrupt_resources(pf); } /********************************************************************* * * This routine disables all traffic on the adapter by issuing a * global reset on the MAC and deallocates TX/RX buffers. * **********************************************************************/ static void ixl_stop_locked(struct ixl_pf *pf) { struct ixl_vsi *vsi = &pf->vsi; struct ifnet *ifp = vsi->ifp; INIT_DEBUGOUT("ixl_stop: begin\n"); IXL_PF_LOCK_ASSERT(pf); /* Stop the local timer */ callout_stop(&pf->timer); if (pf->num_vfs == 0) ixl_disable_intr(vsi); else ixl_disable_rings_intr(vsi); ixl_disable_rings(vsi); /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); return; } /********************************************************************* * * Setup MSIX Interrupt resources and handlers for the VSI * **********************************************************************/ static int ixl_assign_vsi_legacy(struct ixl_pf *pf) { device_t dev = pf->dev; struct ixl_vsi *vsi = &pf->vsi; struct ixl_queue *que = vsi->queues; int error, rid = 0; if (pf->msix == 1) rid = 1; pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (pf->res == NULL) { device_printf(dev, "Unable to allocate" " bus resource: vsi legacy/msi interrupt\n"); return (ENXIO); } /* Set the handler function */ error = bus_setup_intr(dev, pf->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, ixl_intr, pf, &pf->tag); if (error) { pf->res = NULL; device_printf(dev, "Failed to register legacy/msi handler"); return (error); } bus_describe_intr(dev, pf->res, pf->tag, "irq0"); TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que); TASK_INIT(&que->task, 0, ixl_handle_que, que); que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT, taskqueue_thread_enqueue, &que->tq); taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", device_get_nameunit(dev)); TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf); #ifdef PCI_IOV TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf); #endif pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT, taskqueue_thread_enqueue, &pf->tq); taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq", device_get_nameunit(dev)); return (0); } static void ixl_init_taskqueues(struct ixl_pf *pf) { struct ixl_vsi *vsi = &pf->vsi; struct ixl_queue *que = vsi->queues; device_t dev = pf->dev; /* Tasklet for Admin Queue */ TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf); #ifdef PCI_IOV /* VFLR Tasklet */ TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf); #endif /* Create and start PF taskqueue */ pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT, taskqueue_thread_enqueue, &pf->tq); taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq", device_get_nameunit(dev)); /* Create queue tasks and start queue taskqueues */ for (int i = 0; i < vsi->num_queues; i++, que++) { TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que); TASK_INIT(&que->task, 0, ixl_handle_que, que); que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT, taskqueue_thread_enqueue, &que->tq); #ifdef RSS CPU_SETOF(cpu_id, &cpu_mask); taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET, &cpu_mask, "%s (bucket %d)", device_get_nameunit(dev), cpu_id); #else taskqueue_start_threads(&que->tq, 1, PI_NET, "%s (que %d)", device_get_nameunit(dev), que->me); #endif } } static void ixl_free_taskqueues(struct ixl_pf *pf) { struct ixl_vsi *vsi = &pf->vsi; struct ixl_queue *que = vsi->queues; if (pf->tq) taskqueue_free(pf->tq); for (int i = 0; i < vsi->num_queues; i++, que++) { if (que->tq) taskqueue_free(que->tq); } } /********************************************************************* * * Setup MSIX Interrupt resources and handlers for the VSI * **********************************************************************/ static int ixl_assign_vsi_msix(struct ixl_pf *pf) { device_t dev = pf->dev; struct ixl_vsi *vsi = &pf->vsi; struct ixl_queue *que = vsi->queues; struct tx_ring *txr; int error, rid, vector = 0; #ifdef RSS cpuset_t cpu_mask; #endif /* Admin Queue interrupt vector is 0 */ rid = vector + 1; pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (!pf->res) { device_printf(dev, "Unable to allocate" " bus resource: Adminq interrupt [rid=%d]\n", rid); return (ENXIO); } /* Set the adminq vector and handler */ error = bus_setup_intr(dev, pf->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, ixl_msix_adminq, pf, &pf->tag); if (error) { pf->res = NULL; device_printf(dev, "Failed to register Admin que handler"); return (error); } bus_describe_intr(dev, pf->res, pf->tag, "aq"); pf->admvec = vector; ++vector; /* Now set up the stations */ for (int i = 0; i < vsi->num_queues; i++, vector++, que++) { int cpu_id = i; rid = vector + 1; txr = &que->txr; que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (que->res == NULL) { device_printf(dev, "Unable to allocate" " bus resource: que interrupt [rid=%d]\n", rid); return (ENXIO); } /* Set the handler function */ error = bus_setup_intr(dev, que->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, ixl_msix_que, que, &que->tag); if (error) { que->res = NULL; device_printf(dev, "Failed to register que handler"); return (error); } bus_describe_intr(dev, que->res, que->tag, "que%d", i); /* Bind the vector to a CPU */ #ifdef RSS cpu_id = rss_getcpu(i % rss_getnumbuckets()); #endif bus_bind_intr(dev, que->res, cpu_id); que->msix = vector; } return (0); } /* * Allocate MSI/X vectors */ static int ixl_init_msix(struct ixl_pf *pf) { device_t dev = pf->dev; int rid, want, vectors, queues, available; /* Override by tuneable */ if (ixl_enable_msix == 0) goto msi; /* ** When used in a virtualized environment ** PCI BUSMASTER capability may not be set ** so explicity set it here and rewrite ** the ENABLE in the MSIX control register ** at this point to cause the host to ** successfully initialize us. */ { u16 pci_cmd_word; int msix_ctrl; pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); pci_cmd_word |= PCIM_CMD_BUSMASTEREN; pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2); pci_find_cap(dev, PCIY_MSIX, &rid); rid += PCIR_MSIX_CTRL; msix_ctrl = pci_read_config(dev, rid, 2); msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; pci_write_config(dev, rid, msix_ctrl, 2); } /* First try MSI/X */ rid = PCIR_BAR(IXL_BAR); pf->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!pf->msix_mem) { /* May not be enabled */ device_printf(pf->dev, "Unable to map MSIX table\n"); goto msi; } available = pci_msix_count(dev); if (available == 0) { /* system has msix disabled */ bus_release_resource(dev, SYS_RES_MEMORY, rid, pf->msix_mem); pf->msix_mem = NULL; goto msi; } /* Figure out a reasonable auto config value */ queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus; /* Override with hardcoded value if it's less than autoconfig count */ if ((ixl_max_queues != 0) && (ixl_max_queues <= queues)) queues = ixl_max_queues; else if ((ixl_max_queues != 0) && (ixl_max_queues > queues)) device_printf(dev, "ixl_max_queues > # of cpus, using " "autoconfig amount...\n"); /* Or limit maximum auto-configured queues to 8 */ else if ((ixl_max_queues == 0) && (queues > 8)) queues = 8; #ifdef RSS /* If we're doing RSS, clamp at the number of RSS buckets */ if (queues > rss_getnumbuckets()) queues = rss_getnumbuckets(); #endif /* ** Want one vector (RX/TX pair) per queue ** plus an additional for the admin queue. */ want = queues + 1; if (want <= available) /* Have enough */ vectors = want; else { device_printf(pf->dev, "MSIX Configuration Problem, " "%d vectors available but %d wanted!\n", available, want); return (0); /* Will go to Legacy setup */ } if (pci_alloc_msix(dev, &vectors) == 0) { device_printf(pf->dev, "Using MSIX interrupts with %d vectors\n", vectors); pf->msix = vectors; pf->vsi.num_queues = queues; #ifdef RSS /* * If we're doing RSS, the number of queues needs to * match the number of RSS buckets that are configured. * * + If there's more queues than RSS buckets, we'll end * up with queues that get no traffic. * * + If there's more RSS buckets than queues, we'll end * up having multiple RSS buckets map to the same queue, * so there'll be some contention. */ if (queues != rss_getnumbuckets()) { device_printf(dev, "%s: queues (%d) != RSS buckets (%d)" "; performance will be impacted.\n", __func__, queues, rss_getnumbuckets()); } #endif return (vectors); } msi: vectors = pci_msi_count(dev); pf->vsi.num_queues = 1; pf->msix = 1; ixl_max_queues = 1; ixl_enable_msix = 0; if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) device_printf(pf->dev, "Using an MSI interrupt\n"); else { pf->msix = 0; device_printf(pf->dev, "Using a Legacy interrupt\n"); } return (vectors); } /* * Plumb MSIX vectors */ static void ixl_configure_msix(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; u32 reg; u16 vector = 1; /* First set up the adminq - vector 0 */ wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ rd32(hw, I40E_PFINT_ICR0); /* read to clear */ reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | I40E_PFINT_ICR0_ENA_GRST_MASK | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | I40E_PFINT_ICR0_ENA_ADMINQ_MASK | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | I40E_PFINT_ICR0_ENA_VFLR_MASK | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); /* * 0x7FF is the end of the queue list. * This means we won't use MSI-X vector 0 for a queue interrupt * in MSIX mode. */ wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */ wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E); wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); wr32(hw, I40E_PFINT_STAT_CTL0, 0); /* Next configure the queues */ for (int i = 0; i < vsi->num_queues; i++, vector++) { wr32(hw, I40E_PFINT_DYN_CTLN(i), i); wr32(hw, I40E_PFINT_LNKLSTN(i), i); reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); wr32(hw, I40E_QINT_RQCTL(i), reg); reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | ((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); if (i == (vsi->num_queues - 1)) reg |= (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); wr32(hw, I40E_QINT_TQCTL(i), reg); } } /* * Configure for MSI single vector operation */ static void ixl_configure_legacy(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 reg; wr32(hw, I40E_PFINT_ITR0(0), 0); wr32(hw, I40E_PFINT_ITR0(1), 0); /* Setup "other" causes */ reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | I40E_PFINT_ICR0_ENA_GRST_MASK | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | I40E_PFINT_ICR0_ENA_GPIO_MASK | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | I40E_PFINT_ICR0_ENA_VFLR_MASK | I40E_PFINT_ICR0_ENA_ADMINQ_MASK ; wr32(hw, I40E_PFINT_ICR0_ENA, reg); /* SW_ITR_IDX = 0, but don't change INTENA */ wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK | I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK); /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */ wr32(hw, I40E_PFINT_STAT_CTL0, 0); /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ wr32(hw, I40E_PFINT_LNKLST0, 0); /* Associate the queue pair to the vector and enable the q int */ reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); wr32(hw, I40E_QINT_RQCTL(0), reg); reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); wr32(hw, I40E_QINT_TQCTL(0), reg); /* Next enable the queue pair */ reg = rd32(hw, I40E_QTX_ENA(0)); reg |= I40E_QTX_ENA_QENA_REQ_MASK; wr32(hw, I40E_QTX_ENA(0), reg); reg = rd32(hw, I40E_QRX_ENA(0)); reg |= I40E_QRX_ENA_QENA_REQ_MASK; wr32(hw, I40E_QRX_ENA(0), reg); } /* * Set the Initial ITR state */ static void ixl_configure_itr(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; struct ixl_queue *que = vsi->queues; vsi->rx_itr_setting = ixl_rx_itr; if (ixl_dynamic_rx_itr) vsi->rx_itr_setting |= IXL_ITR_DYNAMIC; vsi->tx_itr_setting = ixl_tx_itr; if (ixl_dynamic_tx_itr) vsi->tx_itr_setting |= IXL_ITR_DYNAMIC; for (int i = 0; i < vsi->num_queues; i++, que++) { struct tx_ring *txr = &que->txr; struct rx_ring *rxr = &que->rxr; wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i), vsi->rx_itr_setting); rxr->itr = vsi->rx_itr_setting; rxr->latency = IXL_AVE_LATENCY; wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i), vsi->tx_itr_setting); txr->itr = vsi->tx_itr_setting; txr->latency = IXL_AVE_LATENCY; } } static int ixl_allocate_pci_resources(struct ixl_pf *pf) { int rid; device_t dev = pf->dev; rid = PCIR_BAR(0); pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!(pf->pci_mem)) { device_printf(dev, "Unable to allocate bus resource: memory\n"); return (ENXIO); } pf->osdep.mem_bus_space_tag = rman_get_bustag(pf->pci_mem); pf->osdep.mem_bus_space_handle = rman_get_bushandle(pf->pci_mem); pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem); pf->osdep.flush_reg = I40E_GLGEN_STAT; pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle; pf->hw.back = &pf->osdep; /* ** Now setup MSI or MSI/X, should ** return us the number of supported ** vectors. (Will be 1 for MSI) */ pf->msix = ixl_init_msix(pf); return (0); } static void ixl_free_interrupt_resources(struct ixl_pf *pf) { struct ixl_vsi *vsi = &pf->vsi; struct ixl_queue *que = vsi->queues; device_t dev = pf->dev; int rid; /* We may get here before stations are setup */ if ((!ixl_enable_msix) || (que == NULL)) goto early; /* ** Release all msix VSI resources: */ for (int i = 0; i < vsi->num_queues; i++, que++) { rid = que->msix + 1; if (que->tag != NULL) { bus_teardown_intr(dev, que->res, que->tag); que->tag = NULL; } if (que->res != NULL) { bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); que->res = NULL; } } early: /* Clean the AdminQ interrupt last */ if (pf->admvec) /* we are doing MSIX */ rid = pf->admvec + 1; else (pf->msix != 0) ? (rid = 1):(rid = 0); if (pf->tag != NULL) { bus_teardown_intr(dev, pf->res, pf->tag); pf->tag = NULL; } if (pf->res != NULL) { bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res); pf->res = NULL; } } static void ixl_free_pci_resources(struct ixl_pf *pf) { device_t dev = pf->dev; int memrid; ixl_free_interrupt_resources(pf); if (pf->msix) pci_release_msi(dev); memrid = PCIR_BAR(IXL_BAR); if (pf->msix_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, memrid, pf->msix_mem); if (pf->pci_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), pf->pci_mem); return; } static void ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type) { /* Display supported media types */ if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_XAUI) || phy_type & (1 << I40E_PHY_TYPE_XFI) || phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) || phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) || phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) || phy_type & (1 << I40E_PHY_TYPE_XLAUI) || phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL); #ifndef IFM_ETH_XTYPE if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_CX, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1) || phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) || phy_type & (1 << I40E_PHY_TYPE_SFI)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_XLPPI)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL); #else if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_SFI)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_XLPPI)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL); #endif } /********************************************************************* * * Setup networking device structure and register an interface. * **********************************************************************/ static int ixl_setup_interface(device_t dev, struct ixl_vsi *vsi) { struct ifnet *ifp; struct i40e_hw *hw = vsi->hw; struct ixl_queue *que = vsi->queues; struct i40e_aq_get_phy_abilities_resp abilities; enum i40e_status_code aq_error = 0; INIT_DEBUGOUT("ixl_setup_interface: begin"); ifp = vsi->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not allocate ifnet structure\n"); return (-1); } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_baudrate = IF_Gbps(40); ifp->if_init = ixl_init; ifp->if_softc = vsi; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = ixl_ioctl; #if __FreeBSD_version >= 1100036 if_setgetcounterfn(ifp, ixl_get_counter); #endif ifp->if_transmit = ixl_mq_start; ifp->if_qflush = ixl_qflush; ifp->if_snd.ifq_maxlen = que->num_desc - 2; vsi->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; /* * Tell the upper layer(s) we support long frames. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_HWCSUM; ifp->if_capabilities |= IFCAP_HWCSUM_IPV6; ifp->if_capabilities |= IFCAP_TSO; ifp->if_capabilities |= IFCAP_JUMBO_MTU; ifp->if_capabilities |= IFCAP_LRO; /* VLAN capabilties */ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM; ifp->if_capenable = ifp->if_capabilities; /* ** Don't turn this on by default, if vlans are ** created on another pseudo device (eg. lagg) ** then vlan events are not passed thru, breaking ** operation, but with HW FILTER off it works. If ** using vlans directly on the ixl driver you can ** enable this and get full hardware tag filtering. */ ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; /* * Specify the media types supported by this adapter and register * callbacks to update media and link information */ ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change, ixl_media_status); aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities, NULL); /* May need delay to detect fiber correctly */ if (aq_error == I40E_ERR_UNKNOWN_PHY) { i40e_msec_delay(200); aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities, NULL); } if (aq_error) { if (aq_error == I40E_ERR_UNKNOWN_PHY) device_printf(dev, "Unknown PHY type detected!\n"); else device_printf(dev, "Error getting supported media types, err %d," " AQ error %d\n", aq_error, hw->aq.asq_last_status); return (0); } ixl_add_ifmedia(vsi, abilities.phy_type); /* Use autoselect media by default */ ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO); ether_ifattach(ifp, hw->mac.addr); return (0); } /* ** Run when the Admin Queue gets a link state change interrupt. */ static void ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct i40e_aqc_get_link_status *status = (struct i40e_aqc_get_link_status *)&e->desc.params.raw; /* Firmware workaround: may need to wait for link to actually come up... */ if (!pf->link_up && (status->link_info & I40E_AQ_SIGNAL_DETECT)) { device_printf(dev, "%s: Waiting...\n", __func__); i40e_msec_delay(4000); } /* Request link status from adapter */ hw->phy.get_link_info = TRUE; i40e_get_link_status(hw, &pf->link_up); /* Print out message if an unqualified module is found */ if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && (!(status->link_info & I40E_AQ_LINK_UP))) device_printf(dev, "Link failed because " "an unqualified module was detected!\n"); /* Update OS link info */ ixl_update_link_status(pf); } /********************************************************************* * * Get Firmware Switch configuration * - this will need to be more robust when more complex * switch configurations are enabled. * **********************************************************************/ static int ixl_switch_config(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; device_t dev = vsi->dev; struct i40e_aqc_get_switch_config_resp *sw_config; u8 aq_buf[I40E_AQ_LARGE_BUF]; int ret; u16 next = 0; memset(&aq_buf, 0, sizeof(aq_buf)); sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; ret = i40e_aq_get_switch_config(hw, sw_config, sizeof(aq_buf), &next, NULL); if (ret) { device_printf(dev,"aq_get_switch_config failed (ret=%d)!!\n", ret); return (ret); } #ifdef IXL_DEBUG device_printf(dev, "Switch config: header reported: %d in structure, %d total\n", sw_config->header.num_reported, sw_config->header.num_total); for (int i = 0; i < sw_config->header.num_reported; i++) { device_printf(dev, "%d: type=%d seid=%d uplink=%d downlink=%d\n", i, sw_config->element[i].element_type, sw_config->element[i].seid, sw_config->element[i].uplink_seid, sw_config->element[i].downlink_seid); } #endif /* Simplified due to a single VSI at the moment */ vsi->uplink_seid = sw_config->element[0].uplink_seid; vsi->downlink_seid = sw_config->element[0].downlink_seid; vsi->seid = sw_config->element[0].seid; return (ret); } /********************************************************************* * * Initialize the VSI: this handles contexts, which means things * like the number of descriptors, buffer size, * plus we init the rings thru this function. * **********************************************************************/ static int ixl_initialize_vsi(struct ixl_vsi *vsi) { struct ixl_pf *pf = vsi->back; struct ixl_queue *que = vsi->queues; device_t dev = vsi->dev; struct i40e_hw *hw = vsi->hw; struct i40e_vsi_context ctxt; int err = 0; memset(&ctxt, 0, sizeof(ctxt)); ctxt.seid = vsi->seid; if (pf->veb_seid != 0) ctxt.uplink_seid = pf->veb_seid; ctxt.pf_num = hw->pf_id; err = i40e_aq_get_vsi_params(hw, &ctxt, NULL); if (err) { device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d\n", err); return (err); } #ifdef IXL_DEBUG device_printf(dev, "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, " "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, " "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid, ctxt.uplink_seid, ctxt.vsi_number, ctxt.vsis_allocated, ctxt.vsis_unallocated, ctxt.flags, ctxt.pf_num, ctxt.vf_num, ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits); #endif /* ** Set the queue and traffic class bits ** - when multiple traffic classes are supported ** this will need to be more robust. */ ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG; ctxt.info.queue_mapping[0] = 0; ctxt.info.tc_mapping[0] = 0x0c00; /* Set VLAN receive stripping mode */ ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID; ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL; if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING) ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; else ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING; /* Keep copy of VSI info in VSI for statistic counters */ memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); /* Reset VSI statistics */ ixl_vsi_reset_stats(vsi); vsi->hw_filters_add = 0; vsi->hw_filters_del = 0; ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF); err = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (err) { device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d, aq_error %d\n", err, hw->aq.asq_last_status); return (err); } for (int i = 0; i < vsi->num_queues; i++, que++) { struct tx_ring *txr = &que->txr; struct rx_ring *rxr = &que->rxr; struct i40e_hmc_obj_txq tctx; struct i40e_hmc_obj_rxq rctx; u32 txctl; u16 size; /* Setup the HMC TX Context */ size = que->num_desc * sizeof(struct i40e_tx_desc); memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq)); tctx.new_context = 1; tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS); tctx.qlen = que->num_desc; tctx.fc_ena = 0; tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */ /* Enable HEAD writeback */ tctx.head_wb_ena = 1; tctx.head_wb_addr = txr->dma.pa + (que->num_desc * sizeof(struct i40e_tx_desc)); tctx.rdylist_act = 0; err = i40e_clear_lan_tx_queue_context(hw, i); if (err) { device_printf(dev, "Unable to clear TX context\n"); break; } err = i40e_set_lan_tx_queue_context(hw, i, &tctx); if (err) { device_printf(dev, "Unable to set TX context\n"); break; } /* Associate the ring with this PF */ txctl = I40E_QTX_CTL_PF_QUEUE; txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & I40E_QTX_CTL_PF_INDX_MASK); wr32(hw, I40E_QTX_CTL(i), txctl); ixl_flush(hw); /* Do ring (re)init */ ixl_init_tx_ring(que); /* Next setup the HMC RX Context */ if (vsi->max_frame_size <= MCLBYTES) rxr->mbuf_sz = MCLBYTES; else rxr->mbuf_sz = MJUMPAGESIZE; u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len; /* Set up an RX context for the HMC */ memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq)); rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT; /* ignore header split for now */ rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT; rctx.rxmax = (vsi->max_frame_size < max_rxmax) ? vsi->max_frame_size : max_rxmax; rctx.dtype = 0; rctx.dsize = 1; /* do 32byte descriptors */ rctx.hsplit_0 = 0; /* no HDR split initially */ rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS); rctx.qlen = que->num_desc; rctx.tphrdesc_ena = 1; rctx.tphwdesc_ena = 1; rctx.tphdata_ena = 0; rctx.tphhead_ena = 0; rctx.lrxqthresh = 2; rctx.crcstrip = 1; rctx.l2tsel = 1; rctx.showiv = 1; rctx.fc_ena = 0; rctx.prefena = 1; err = i40e_clear_lan_rx_queue_context(hw, i); if (err) { device_printf(dev, "Unable to clear RX context %d\n", i); break; } err = i40e_set_lan_rx_queue_context(hw, i, &rctx); if (err) { device_printf(dev, "Unable to set RX context %d\n", i); break; } err = ixl_init_rx_ring(que); if (err) { device_printf(dev, "Fail in init_rx_ring %d\n", i); break; } wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0); #ifdef DEV_NETMAP /* preserve queue */ if (vsi->ifp->if_capenable & IFCAP_NETMAP) { struct netmap_adapter *na = NA(vsi->ifp); struct netmap_kring *kring = &na->rx_rings[i]; int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); wr32(vsi->hw, I40E_QRX_TAIL(que->me), t); } else #endif /* DEV_NETMAP */ wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1); } return (err); } /********************************************************************* * * Free all VSI structs. * **********************************************************************/ void ixl_free_vsi(struct ixl_vsi *vsi) { struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct ixl_queue *que = vsi->queues; /* Free station queues */ if (!vsi->queues) goto free_filters; for (int i = 0; i < vsi->num_queues; i++, que++) { struct tx_ring *txr = &que->txr; struct rx_ring *rxr = &que->rxr; if (!mtx_initialized(&txr->mtx)) /* uninitialized */ continue; IXL_TX_LOCK(txr); ixl_free_que_tx(que); if (txr->base) i40e_free_dma_mem(&pf->hw, &txr->dma); IXL_TX_UNLOCK(txr); IXL_TX_LOCK_DESTROY(txr); if (!mtx_initialized(&rxr->mtx)) /* uninitialized */ continue; IXL_RX_LOCK(rxr); ixl_free_que_rx(que); if (rxr->base) i40e_free_dma_mem(&pf->hw, &rxr->dma); IXL_RX_UNLOCK(rxr); IXL_RX_LOCK_DESTROY(rxr); } free(vsi->queues, M_DEVBUF); free_filters: /* Free VSI filter list */ ixl_free_mac_filters(vsi); } static void ixl_free_mac_filters(struct ixl_vsi *vsi) { struct ixl_mac_filter *f; while (!SLIST_EMPTY(&vsi->ftl)) { f = SLIST_FIRST(&vsi->ftl); SLIST_REMOVE_HEAD(&vsi->ftl, next); free(f, M_DEVBUF); } } /********************************************************************* * * Allocate memory for the VSI (virtual station interface) and their * associated queues, rings and the descriptors associated with each, * called only once at attach. * **********************************************************************/ static int ixl_setup_stations(struct ixl_pf *pf) { device_t dev = pf->dev; struct ixl_vsi *vsi; struct ixl_queue *que; struct tx_ring *txr; struct rx_ring *rxr; int rsize, tsize; int error = I40E_SUCCESS; vsi = &pf->vsi; vsi->back = (void *)pf; vsi->hw = &pf->hw; vsi->id = 0; vsi->num_vlans = 0; vsi->back = pf; /* Get memory for the station queues */ if (!(vsi->queues = (struct ixl_queue *) malloc(sizeof(struct ixl_queue) * vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate queue memory\n"); error = ENOMEM; goto early; } for (int i = 0; i < vsi->num_queues; i++) { que = &vsi->queues[i]; que->num_desc = ixl_ringsz; que->me = i; que->vsi = vsi; /* mark the queue as active */ vsi->active_queues |= (u64)1 << que->me; txr = &que->txr; txr->que = que; txr->tail = I40E_QTX_TAIL(que->me); /* Initialize the TX lock */ snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", device_get_nameunit(dev), que->me); mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF); /* Create the TX descriptor ring */ tsize = roundup2((que->num_desc * sizeof(struct i40e_tx_desc)) + sizeof(u32), DBA_ALIGN); if (i40e_allocate_dma_mem(&pf->hw, &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) { device_printf(dev, "Unable to allocate TX Descriptor memory\n"); error = ENOMEM; goto fail; } txr->base = (struct i40e_tx_desc *)txr->dma.va; bzero((void *)txr->base, tsize); /* Now allocate transmit soft structs for the ring */ if (ixl_allocate_tx_data(que)) { device_printf(dev, "Critical Failure setting up TX structures\n"); error = ENOMEM; goto fail; } /* Allocate a buf ring */ txr->br = buf_ring_alloc(4096, M_DEVBUF, M_NOWAIT, &txr->mtx); if (txr->br == NULL) { device_printf(dev, "Critical Failure setting up TX buf ring\n"); error = ENOMEM; goto fail; } /* * Next the RX queues... */ rsize = roundup2(que->num_desc * sizeof(union i40e_rx_desc), DBA_ALIGN); rxr = &que->rxr; rxr->que = que; rxr->tail = I40E_QRX_TAIL(que->me); /* Initialize the RX side lock */ snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", device_get_nameunit(dev), que->me); mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF); if (i40e_allocate_dma_mem(&pf->hw, &rxr->dma, i40e_mem_reserved, rsize, 4096)) { device_printf(dev, "Unable to allocate RX Descriptor memory\n"); error = ENOMEM; goto fail; } rxr->base = (union i40e_rx_desc *)rxr->dma.va; bzero((void *)rxr->base, rsize); /* Allocate receive soft structs for the ring*/ if (ixl_allocate_rx_data(que)) { device_printf(dev, "Critical Failure setting up receive structs\n"); error = ENOMEM; goto fail; } } return (0); fail: for (int i = 0; i < vsi->num_queues; i++) { que = &vsi->queues[i]; rxr = &que->rxr; txr = &que->txr; if (rxr->base) i40e_free_dma_mem(&pf->hw, &rxr->dma); if (txr->base) i40e_free_dma_mem(&pf->hw, &txr->dma); } early: return (error); } /* ** Provide a update to the queue RX ** interrupt moderation value. */ static void ixl_set_queue_rx_itr(struct ixl_queue *que) { struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct rx_ring *rxr = &que->rxr; u16 rx_itr; u16 rx_latency = 0; int rx_bytes; /* Idle, do nothing */ if (rxr->bytes == 0) return; if (ixl_dynamic_rx_itr) { rx_bytes = rxr->bytes/rxr->itr; rx_itr = rxr->itr; /* Adjust latency range */ switch (rxr->latency) { case IXL_LOW_LATENCY: if (rx_bytes > 10) { rx_latency = IXL_AVE_LATENCY; rx_itr = IXL_ITR_20K; } break; case IXL_AVE_LATENCY: if (rx_bytes > 20) { rx_latency = IXL_BULK_LATENCY; rx_itr = IXL_ITR_8K; } else if (rx_bytes <= 10) { rx_latency = IXL_LOW_LATENCY; rx_itr = IXL_ITR_100K; } break; case IXL_BULK_LATENCY: if (rx_bytes <= 20) { rx_latency = IXL_AVE_LATENCY; rx_itr = IXL_ITR_20K; } break; } rxr->latency = rx_latency; if (rx_itr != rxr->itr) { /* do an exponential smoothing */ rx_itr = (10 * rx_itr * rxr->itr) / ((9 * rx_itr) + rxr->itr); rxr->itr = rx_itr & IXL_MAX_ITR; wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, que->me), rxr->itr); } } else { /* We may have have toggled to non-dynamic */ if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC) vsi->rx_itr_setting = ixl_rx_itr; /* Update the hardware if needed */ if (rxr->itr != vsi->rx_itr_setting) { rxr->itr = vsi->rx_itr_setting; wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, que->me), rxr->itr); } } rxr->bytes = 0; rxr->packets = 0; return; } /* ** Provide a update to the queue TX ** interrupt moderation value. */ static void ixl_set_queue_tx_itr(struct ixl_queue *que) { struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct tx_ring *txr = &que->txr; u16 tx_itr; u16 tx_latency = 0; int tx_bytes; /* Idle, do nothing */ if (txr->bytes == 0) return; if (ixl_dynamic_tx_itr) { tx_bytes = txr->bytes/txr->itr; tx_itr = txr->itr; switch (txr->latency) { case IXL_LOW_LATENCY: if (tx_bytes > 10) { tx_latency = IXL_AVE_LATENCY; tx_itr = IXL_ITR_20K; } break; case IXL_AVE_LATENCY: if (tx_bytes > 20) { tx_latency = IXL_BULK_LATENCY; tx_itr = IXL_ITR_8K; } else if (tx_bytes <= 10) { tx_latency = IXL_LOW_LATENCY; tx_itr = IXL_ITR_100K; } break; case IXL_BULK_LATENCY: if (tx_bytes <= 20) { tx_latency = IXL_AVE_LATENCY; tx_itr = IXL_ITR_20K; } break; } txr->latency = tx_latency; if (tx_itr != txr->itr) { /* do an exponential smoothing */ tx_itr = (10 * tx_itr * txr->itr) / ((9 * tx_itr) + txr->itr); txr->itr = tx_itr & IXL_MAX_ITR; wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, que->me), txr->itr); } } else { /* We may have have toggled to non-dynamic */ if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC) vsi->tx_itr_setting = ixl_tx_itr; /* Update the hardware if needed */ if (txr->itr != vsi->tx_itr_setting) { txr->itr = vsi->tx_itr_setting; wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, que->me), txr->itr); } } txr->bytes = 0; txr->packets = 0; return; } #define QUEUE_NAME_LEN 32 static void ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi, struct sysctl_ctx_list *ctx, const char *sysctl_name) { struct sysctl_oid *tree; struct sysctl_oid_list *child; struct sysctl_oid_list *vsi_list; tree = device_get_sysctl_tree(pf->dev); child = SYSCTL_CHILDREN(tree); vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name, CTLFLAG_RD, NULL, "VSI Number"); vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats); } static void ixl_add_hw_stats(struct ixl_pf *pf) { device_t dev = pf->dev; struct ixl_vsi *vsi = &pf->vsi; struct ixl_queue *queues = vsi->queues; struct i40e_hw_port_stats *pf_stats = &pf->stats; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); struct sysctl_oid_list *vsi_list; struct sysctl_oid *queue_node; struct sysctl_oid_list *queue_list; struct tx_ring *txr; struct rx_ring *rxr; char queue_namebuf[QUEUE_NAME_LEN]; /* Driver statistics */ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", CTLFLAG_RD, &pf->watchdog_events, "Watchdog timeouts"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq", CTLFLAG_RD, &pf->admin_irq, "Admin Queue IRQ Handled"); ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf"); vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node); /* Queue statistics */ for (int q = 0; q < vsi->num_queues; q++) { snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q); queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #"); queue_list = SYSCTL_CHILDREN(queue_node); txr = &(queues[q].txr); rxr = &(queues[q].rxr); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed", CTLFLAG_RD, &(queues[q].mbuf_defrag_failed), "m_defrag() failed"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped", CTLFLAG_RD, &(queues[q].dropped_pkts), "Driver dropped packets"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", CTLFLAG_RD, &(queues[q].irqs), "irqs on this queue"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", CTLFLAG_RD, &(queues[q].tso), "TSO"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup", CTLFLAG_RD, &(queues[q].tx_dma_setup), "Driver tx dma failure in xmit"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", CTLFLAG_RD, &(txr->no_desc), "Queue No Descriptor Available"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", CTLFLAG_RD, &(txr->total_packets), "Queue Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes", CTLFLAG_RD, &(txr->tx_bytes), "Queue Bytes Transmitted"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", CTLFLAG_RD, &(rxr->rx_packets), "Queue Packets Received"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", CTLFLAG_RD, &(rxr->rx_bytes), "Queue Bytes Received"); } /* MAC stats */ ixl_add_sysctls_mac_stats(ctx, child, pf_stats); } static void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct i40e_eth_stats *eth_stats) { struct ixl_sysctl_info ctls[] = { {ð_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"}, {ð_stats->rx_unicast, "ucast_pkts_rcvd", "Unicast Packets Received"}, {ð_stats->rx_multicast, "mcast_pkts_rcvd", "Multicast Packets Received"}, {ð_stats->rx_broadcast, "bcast_pkts_rcvd", "Broadcast Packets Received"}, {ð_stats->rx_discards, "rx_discards", "Discarded RX packets"}, {ð_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"}, {ð_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"}, {ð_stats->tx_multicast, "mcast_pkts_txd", "Multicast Packets Transmitted"}, {ð_stats->tx_broadcast, "bcast_pkts_txd", "Broadcast Packets Transmitted"}, // end {0,0,0} }; struct ixl_sysctl_info *entry = ctls; while (entry->stat != NULL) { SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name, CTLFLAG_RD, entry->stat, entry->description); entry++; } } static void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct i40e_hw_port_stats *stats) { struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac", CTLFLAG_RD, NULL, "Mac Statistics"); struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node); struct i40e_eth_stats *eth_stats = &stats->eth; ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats); struct ixl_sysctl_info ctls[] = { {&stats->crc_errors, "crc_errors", "CRC Errors"}, {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"}, {&stats->mac_local_faults, "local_faults", "MAC Local Faults"}, {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"}, {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"}, /* Packet Reception Stats */ {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"}, {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"}, {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"}, {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"}, {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"}, {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"}, {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"}, {&stats->rx_undersize, "rx_undersize", "Undersized packets received"}, {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"}, {&stats->rx_oversize, "rx_oversized", "Oversized packets received"}, {&stats->rx_jabber, "rx_jabber", "Received Jabber"}, {&stats->checksum_error, "checksum_errors", "Checksum Errors"}, /* Packet Transmission Stats */ {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"}, {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"}, {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"}, {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"}, {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"}, {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"}, {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"}, /* Flow control */ {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"}, {&stats->link_xon_rx, "xon_recvd", "Link XON received"}, {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"}, {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"}, /* End */ {0,0,0} }; struct ixl_sysctl_info *entry = ctls; while (entry->stat != NULL) { SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name, CTLFLAG_RD, entry->stat, entry->description); entry++; } } /* ** ixl_config_rss - setup RSS ** - note this is done for the single vsi */ static void ixl_config_rss(struct ixl_vsi *vsi) { struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct i40e_hw *hw = vsi->hw; u32 lut = 0; u64 set_hena = 0, hena; int i, j, que_id; #ifdef RSS u32 rss_hash_config; u32 rss_seed[IXL_KEYSZ]; #else u32 rss_seed[IXL_KEYSZ] = {0x41b01687, 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377, 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1}; #endif #ifdef RSS /* Fetch the configured RSS key */ rss_getkey((uint8_t *) &rss_seed); #endif /* Fill out hash function seed */ for (i = 0; i < IXL_KEYSZ; i++) wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]); /* Enable PCTYPES for RSS: */ #ifdef RSS rss_hash_config = rss_gethashconfig(); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP); #else set_hena = ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD); #endif hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); hena |= set_hena; wr32(hw, I40E_PFQF_HENA(0), (u32)hena); wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); /* Populate the LUT with max no. of queues in round robin fashion */ for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) { if (j == vsi->num_queues) j = 0; #ifdef RSS /* * Fetch the RSS bucket id for the given indirection entry. * Cap it at the number of configured buckets (which is * num_queues.) */ que_id = rss_get_indirection_to_bucket(i); que_id = que_id % vsi->num_queues; #else que_id = j; #endif /* lut = 4-byte sliding window of 4 lut entries */ lut = (lut << 8) | (que_id & ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1)); /* On i = 3, we have 4 entries in lut; write to the register */ if ((i & 3) == 3) wr32(hw, I40E_PFQF_HLUT(i >> 2), lut); } ixl_flush(hw); } /* ** This routine is run via an vlan config EVENT, ** it enables us to use the HW Filter table since ** we can get the vlan id. This just creates the ** entry in the soft version of the VFTA, init will ** repopulate the real table. */ static void ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) { struct ixl_vsi *vsi = ifp->if_softc; struct i40e_hw *hw = vsi->hw; struct ixl_pf *pf = (struct ixl_pf *)vsi->back; if (ifp->if_softc != arg) /* Not our event */ return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; IXL_PF_LOCK(pf); ++vsi->num_vlans; ixl_add_filter(vsi, hw->mac.addr, vtag); IXL_PF_UNLOCK(pf); } /* ** This routine is run via an vlan ** unconfig EVENT, remove our entry ** in the soft vfta. */ static void ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) { struct ixl_vsi *vsi = ifp->if_softc; struct i40e_hw *hw = vsi->hw; struct ixl_pf *pf = (struct ixl_pf *)vsi->back; if (ifp->if_softc != arg) return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; IXL_PF_LOCK(pf); --vsi->num_vlans; ixl_del_filter(vsi, hw->mac.addr, vtag); IXL_PF_UNLOCK(pf); } /* ** This routine updates vlan filters, called by init ** it scans the filter table and then updates the hw ** after a soft reset. */ static void ixl_setup_vlan_filters(struct ixl_vsi *vsi) { struct ixl_mac_filter *f; int cnt = 0, flags; if (vsi->num_vlans == 0) return; /* ** Scan the filter list for vlan entries, ** mark them for addition and then call ** for the AQ update. */ SLIST_FOREACH(f, &vsi->ftl, next) { if (f->flags & IXL_FILTER_VLAN) { f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); cnt++; } } if (cnt == 0) { printf("setup vlan: no filters found!\n"); return; } flags = IXL_FILTER_VLAN; flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); ixl_add_hw_filters(vsi, flags, cnt); return; } /* ** Initialize filter list and add filters that the hardware ** needs to know about. */ static void ixl_init_filters(struct ixl_vsi *vsi) { /* Add broadcast address */ ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY); } /* ** This routine adds mulicast filters */ static void ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr) { struct ixl_mac_filter *f; /* Does one already exist */ f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY); if (f != NULL) return; f = ixl_get_filter(vsi); if (f == NULL) { printf("WARNING: no filter available!!\n"); return; } bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); f->vlan = IXL_VLAN_ANY; f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC); return; } static void ixl_reconfigure_filters(struct ixl_vsi *vsi) { ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs); } /* ** This routine adds macvlan filters */ static void ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan) { struct ixl_mac_filter *f, *tmp; struct ixl_pf *pf; device_t dev; DEBUGOUT("ixl_add_filter: begin"); pf = vsi->back; dev = pf->dev; /* Does one already exist */ f = ixl_find_filter(vsi, macaddr, vlan); if (f != NULL) return; /* ** Is this the first vlan being registered, if so we ** need to remove the ANY filter that indicates we are ** not in a vlan, and replace that with a 0 filter. */ if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) { tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY); if (tmp != NULL) { ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY); ixl_add_filter(vsi, macaddr, 0); } } f = ixl_get_filter(vsi); if (f == NULL) { device_printf(dev, "WARNING: no filter available!!\n"); return; } bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); f->vlan = vlan; f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); if (f->vlan != IXL_VLAN_ANY) f->flags |= IXL_FILTER_VLAN; else vsi->num_macs++; ixl_add_hw_filters(vsi, f->flags, 1); return; } static void ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan) { struct ixl_mac_filter *f; f = ixl_find_filter(vsi, macaddr, vlan); if (f == NULL) return; f->flags |= IXL_FILTER_DEL; ixl_del_hw_filters(vsi, 1); vsi->num_macs--; /* Check if this is the last vlan removal */ if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) { /* Switch back to a non-vlan filter */ ixl_del_filter(vsi, macaddr, 0); ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY); } return; } /* ** Find the filter with both matching mac addr and vlan id */ static struct ixl_mac_filter * ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan) { struct ixl_mac_filter *f; bool match = FALSE; SLIST_FOREACH(f, &vsi->ftl, next) { if (!cmp_etheraddr(f->macaddr, macaddr)) continue; if (f->vlan == vlan) { match = TRUE; break; } } if (!match) f = NULL; return (f); } /* ** This routine takes additions to the vsi filter ** table and creates an Admin Queue call to create ** the filters in the hardware. */ static void ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt) { struct i40e_aqc_add_macvlan_element_data *a, *b; struct ixl_mac_filter *f; struct ixl_pf *pf; struct i40e_hw *hw; device_t dev; int err, j = 0; pf = vsi->back; dev = pf->dev; hw = &pf->hw; IXL_PF_LOCK_ASSERT(pf); a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt, M_DEVBUF, M_NOWAIT | M_ZERO); if (a == NULL) { device_printf(dev, "add_hw_filters failed to get memory\n"); return; } /* ** Scan the filter list, each time we find one ** we add it to the admin queue array and turn off ** the add bit. */ SLIST_FOREACH(f, &vsi->ftl, next) { if (f->flags == flags) { b = &a[j]; // a pox on fvl long names :) bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN); if (f->vlan == IXL_VLAN_ANY) { b->vlan_tag = 0; b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; } else { b->vlan_tag = f->vlan; b->flags = 0; } b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; f->flags &= ~IXL_FILTER_ADD; j++; } if (j == cnt) break; } if (j > 0) { err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL); if (err) device_printf(dev, "aq_add_macvlan err %d, " "aq_error %d\n", err, hw->aq.asq_last_status); else vsi->hw_filters_add += j; } free(a, M_DEVBUF); return; } /* ** This routine takes removals in the vsi filter ** table and creates an Admin Queue call to delete ** the filters in the hardware. */ static void ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt) { struct i40e_aqc_remove_macvlan_element_data *d, *e; struct ixl_pf *pf; struct i40e_hw *hw; device_t dev; struct ixl_mac_filter *f, *f_temp; int err, j = 0; DEBUGOUT("ixl_del_hw_filters: begin\n"); pf = vsi->back; hw = &pf->hw; dev = pf->dev; d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt, M_DEVBUF, M_NOWAIT | M_ZERO); if (d == NULL) { printf("del hw filter failed to get memory\n"); return; } SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) { if (f->flags & IXL_FILTER_DEL) { e = &d[j]; // a pox on fvl long names :) bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN); e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan); e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; /* delete entry from vsi list */ SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next); free(f, M_DEVBUF); j++; } if (j == cnt) break; } if (j > 0) { err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL); /* NOTE: returns ENOENT every time but seems to work fine, so we'll ignore that specific error. */ // TODO: Does this still occur on current firmwares? if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) { int sc = 0; for (int i = 0; i < j; i++) sc += (!d[i].error_code); vsi->hw_filters_del += sc; device_printf(dev, "Failed to remove %d/%d filters, aq error %d\n", j - sc, j, hw->aq.asq_last_status); } else vsi->hw_filters_del += j; } free(d, M_DEVBUF); DEBUGOUT("ixl_del_hw_filters: end\n"); return; } static int ixl_enable_rings(struct ixl_vsi *vsi) { struct ixl_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; int index, error; u32 reg; error = 0; for (int i = 0; i < vsi->num_queues; i++) { index = vsi->first_queue + i; i40e_pre_tx_queue_cfg(hw, index, TRUE); reg = rd32(hw, I40E_QTX_ENA(index)); reg |= I40E_QTX_ENA_QENA_REQ_MASK | I40E_QTX_ENA_QENA_STAT_MASK; wr32(hw, I40E_QTX_ENA(index), reg); /* Verify the enable took */ for (int j = 0; j < 10; j++) { reg = rd32(hw, I40E_QTX_ENA(index)); if (reg & I40E_QTX_ENA_QENA_STAT_MASK) break; i40e_msec_delay(10); } if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { device_printf(pf->dev, "TX queue %d disabled!\n", index); error = ETIMEDOUT; } reg = rd32(hw, I40E_QRX_ENA(index)); reg |= I40E_QRX_ENA_QENA_REQ_MASK | I40E_QRX_ENA_QENA_STAT_MASK; wr32(hw, I40E_QRX_ENA(index), reg); /* Verify the enable took */ for (int j = 0; j < 10; j++) { reg = rd32(hw, I40E_QRX_ENA(index)); if (reg & I40E_QRX_ENA_QENA_STAT_MASK) break; i40e_msec_delay(10); } if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { device_printf(pf->dev, "RX queue %d disabled!\n", index); error = ETIMEDOUT; } } return (error); } static int ixl_disable_rings(struct ixl_vsi *vsi) { struct ixl_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; int index, error; u32 reg; error = 0; for (int i = 0; i < vsi->num_queues; i++) { index = vsi->first_queue + i; i40e_pre_tx_queue_cfg(hw, index, FALSE); i40e_usec_delay(500); reg = rd32(hw, I40E_QTX_ENA(index)); reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; wr32(hw, I40E_QTX_ENA(index), reg); /* Verify the disable took */ for (int j = 0; j < 10; j++) { reg = rd32(hw, I40E_QTX_ENA(index)); if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK)) break; i40e_msec_delay(10); } if (reg & I40E_QTX_ENA_QENA_STAT_MASK) { device_printf(pf->dev, "TX queue %d still enabled!\n", index); error = ETIMEDOUT; } reg = rd32(hw, I40E_QRX_ENA(index)); reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; wr32(hw, I40E_QRX_ENA(index), reg); /* Verify the disable took */ for (int j = 0; j < 10; j++) { reg = rd32(hw, I40E_QRX_ENA(index)); if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK)) break; i40e_msec_delay(10); } if (reg & I40E_QRX_ENA_QENA_STAT_MASK) { device_printf(pf->dev, "RX queue %d still enabled!\n", index); error = ETIMEDOUT; } } return (error); } /** * ixl_handle_mdd_event * * Called from interrupt handler to identify possibly malicious vfs * (But also detects events from the PF, as well) **/ static void ixl_handle_mdd_event(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; bool mdd_detected = false; bool pf_mdd_detected = false; u32 reg; /* find what triggered the MDD event */ reg = rd32(hw, I40E_GL_MDET_TX); if (reg & I40E_GL_MDET_TX_VALID_MASK) { u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> I40E_GL_MDET_TX_PF_NUM_SHIFT; u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> I40E_GL_MDET_TX_EVENT_SHIFT; u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> I40E_GL_MDET_TX_QUEUE_SHIFT; device_printf(dev, "Malicious Driver Detection event 0x%02x" " on TX queue %d pf number 0x%02x\n", event, queue, pf_num); wr32(hw, I40E_GL_MDET_TX, 0xffffffff); mdd_detected = true; } reg = rd32(hw, I40E_GL_MDET_RX); if (reg & I40E_GL_MDET_RX_VALID_MASK) { u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> I40E_GL_MDET_RX_FUNCTION_SHIFT; u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> I40E_GL_MDET_RX_EVENT_SHIFT; u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> I40E_GL_MDET_RX_QUEUE_SHIFT; device_printf(dev, "Malicious Driver Detection event 0x%02x" " on RX queue %d of function 0x%02x\n", event, queue, func); wr32(hw, I40E_GL_MDET_RX, 0xffffffff); mdd_detected = true; } if (mdd_detected) { reg = rd32(hw, I40E_PF_MDET_TX); if (reg & I40E_PF_MDET_TX_VALID_MASK) { wr32(hw, I40E_PF_MDET_TX, 0xFFFF); device_printf(dev, "MDD TX event is for this function 0x%08x", reg); pf_mdd_detected = true; } reg = rd32(hw, I40E_PF_MDET_RX); if (reg & I40E_PF_MDET_RX_VALID_MASK) { wr32(hw, I40E_PF_MDET_RX, 0xFFFF); device_printf(dev, "MDD RX event is for this function 0x%08x", reg); pf_mdd_detected = true; } } /* re-enable mdd interrupt cause */ reg = rd32(hw, I40E_PFINT_ICR0_ENA); reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); ixl_flush(hw); } static void ixl_enable_intr(struct ixl_vsi *vsi) { struct i40e_hw *hw = vsi->hw; struct ixl_queue *que = vsi->queues; if (ixl_enable_msix) { ixl_enable_adminq(hw); for (int i = 0; i < vsi->num_queues; i++, que++) ixl_enable_queue(hw, que->me); } else ixl_enable_legacy(hw); } static void ixl_disable_rings_intr(struct ixl_vsi *vsi) { struct i40e_hw *hw = vsi->hw; struct ixl_queue *que = vsi->queues; for (int i = 0; i < vsi->num_queues; i++, que++) ixl_disable_queue(hw, que->me); } static void ixl_disable_intr(struct ixl_vsi *vsi) { struct i40e_hw *hw = vsi->hw; if (ixl_enable_msix) ixl_disable_adminq(hw); else ixl_disable_legacy(hw); } static void ixl_enable_adminq(struct i40e_hw *hw) { u32 reg; reg = I40E_PFINT_DYN_CTL0_INTENA_MASK | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); wr32(hw, I40E_PFINT_DYN_CTL0, reg); ixl_flush(hw); } static void ixl_disable_adminq(struct i40e_hw *hw) { u32 reg; reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT; wr32(hw, I40E_PFINT_DYN_CTL0, reg); ixl_flush(hw); } static void ixl_enable_queue(struct i40e_hw *hw, int id) { u32 reg; reg = I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); } static void ixl_disable_queue(struct i40e_hw *hw, int id) { u32 reg; reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); } static void ixl_enable_legacy(struct i40e_hw *hw) { u32 reg; reg = I40E_PFINT_DYN_CTL0_INTENA_MASK | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); wr32(hw, I40E_PFINT_DYN_CTL0, reg); } static void ixl_disable_legacy(struct i40e_hw *hw) { u32 reg; reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT; wr32(hw, I40E_PFINT_DYN_CTL0, reg); } static void ixl_update_stats_counters(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; struct ixl_vf *vf; struct i40e_hw_port_stats *nsd = &pf->stats; struct i40e_hw_port_stats *osd = &pf->stats_offsets; /* Update hw stats */ ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), pf->stat_offsets_loaded, &osd->crc_errors, &nsd->crc_errors); ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), pf->stat_offsets_loaded, &osd->illegal_bytes, &nsd->illegal_bytes); ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), I40E_GLPRT_GORCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_bytes, &nsd->eth.rx_bytes); ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), I40E_GLPRT_GOTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_bytes, &nsd->eth.tx_bytes); ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_discards, &nsd->eth.rx_discards); ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), I40E_GLPRT_UPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_unicast, &nsd->eth.rx_unicast); ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), I40E_GLPRT_UPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_unicast, &nsd->eth.tx_unicast); ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), I40E_GLPRT_MPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_multicast, &nsd->eth.rx_multicast); ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), I40E_GLPRT_MPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_multicast, &nsd->eth.tx_multicast); ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), I40E_GLPRT_BPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_broadcast, &nsd->eth.rx_broadcast); ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), I40E_GLPRT_BPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_broadcast, &nsd->eth.tx_broadcast); ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), pf->stat_offsets_loaded, &osd->tx_dropped_link_down, &nsd->tx_dropped_link_down); ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), pf->stat_offsets_loaded, &osd->mac_local_faults, &nsd->mac_local_faults); ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), pf->stat_offsets_loaded, &osd->mac_remote_faults, &nsd->mac_remote_faults); ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), pf->stat_offsets_loaded, &osd->rx_length_errors, &nsd->rx_length_errors); /* Flow control (LFC) stats */ ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), pf->stat_offsets_loaded, &osd->link_xon_rx, &nsd->link_xon_rx); ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), pf->stat_offsets_loaded, &osd->link_xon_tx, &nsd->link_xon_tx); ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), pf->stat_offsets_loaded, &osd->link_xoff_rx, &nsd->link_xoff_rx); ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), pf->stat_offsets_loaded, &osd->link_xoff_tx, &nsd->link_xoff_tx); /* Packet size stats rx */ ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), I40E_GLPRT_PRC64L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_64, &nsd->rx_size_64); ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), I40E_GLPRT_PRC127L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_127, &nsd->rx_size_127); ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), I40E_GLPRT_PRC255L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_255, &nsd->rx_size_255); ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), I40E_GLPRT_PRC511L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_511, &nsd->rx_size_511); ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), I40E_GLPRT_PRC1023L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_1023, &nsd->rx_size_1023); ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), I40E_GLPRT_PRC1522L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_1522, &nsd->rx_size_1522); ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), I40E_GLPRT_PRC9522L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_big, &nsd->rx_size_big); /* Packet size stats tx */ ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), I40E_GLPRT_PTC64L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_64, &nsd->tx_size_64); ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), I40E_GLPRT_PTC127L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_127, &nsd->tx_size_127); ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), I40E_GLPRT_PTC255L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_255, &nsd->tx_size_255); ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), I40E_GLPRT_PTC511L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_511, &nsd->tx_size_511); ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), I40E_GLPRT_PTC1023L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_1023, &nsd->tx_size_1023); ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), I40E_GLPRT_PTC1522L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_1522, &nsd->tx_size_1522); ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), I40E_GLPRT_PTC9522L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_big, &nsd->tx_size_big); ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port), pf->stat_offsets_loaded, &osd->rx_undersize, &nsd->rx_undersize); ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port), pf->stat_offsets_loaded, &osd->rx_fragments, &nsd->rx_fragments); ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port), pf->stat_offsets_loaded, &osd->rx_oversize, &nsd->rx_oversize); ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port), pf->stat_offsets_loaded, &osd->rx_jabber, &nsd->rx_jabber); pf->stat_offsets_loaded = true; /* End hw stats */ /* Update vsi stats */ ixl_update_vsi_stats(vsi); for (int i = 0; i < pf->num_vfs; i++) { vf = &pf->vfs[i]; if (vf->vf_flags & VF_FLAG_ENABLED) ixl_update_eth_stats(&pf->vfs[i].vsi); } } /* ** Tasklet handler for MSIX Adminq interrupts ** - do outside interrupt since it might sleep */ static void ixl_do_adminq(void *context, int pending) { struct ixl_pf *pf = context; struct i40e_hw *hw = &pf->hw; struct i40e_arq_event_info event; i40e_status ret; device_t dev = pf->dev; u32 reg, loop = 0; u16 opcode, result; // XXX: Possibly inappropriate overload if (pf->state & IXL_PF_STATE_EMPR_RESETTING) { int count = 0; // ERJ: Typically finishes within 3-4 seconds while (count++ < 100) { reg = rd32(hw, I40E_GLGEN_RSTAT); reg = reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK; if (reg) { i40e_msec_delay(100); } else { break; } } device_printf(dev, "EMPR reset wait count: %d\n", count); device_printf(dev, "Rebuilding HW structs...\n"); // XXX: I feel like this could cause a kernel panic some time in the future ixl_stop(pf); ixl_init(pf); atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING); return; } // Actually do Admin Queue handling event.buf_len = IXL_AQ_BUF_SZ; event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT | M_ZERO); if (!event.msg_buf) { device_printf(dev, "%s: Unable to allocate memory for Admin" " Queue event!\n", __func__); return; } IXL_PF_LOCK(pf); /* clean and process any events */ do { ret = i40e_clean_arq_element(hw, &event, &result); if (ret) break; opcode = LE16_TO_CPU(event.desc.opcode); #ifdef IXL_DEBUG device_printf(dev, "%s: Admin Queue event: %#06x\n", __func__, opcode); #endif switch (opcode) { case i40e_aqc_opc_get_link_status: ixl_link_event(pf, &event); break; case i40e_aqc_opc_send_msg_to_pf: #ifdef PCI_IOV ixl_handle_vf_msg(pf, &event); #endif break; case i40e_aqc_opc_event_lan_overflow: default: break; } } while (result && (loop++ < IXL_ADM_LIMIT)); free(event.msg_buf, M_DEVBUF); /* * If there are still messages to process, reschedule ourselves. * Otherwise, re-enable our interrupt and go to sleep. */ if (result > 0) taskqueue_enqueue(pf->tq, &pf->adminq); else ixl_enable_adminq(hw); IXL_PF_UNLOCK(pf); } static int ixl_debug_info(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf; int error, input = 0; error = sysctl_handle_int(oidp, &input, 0, req); if (error || !req->newptr) return (error); if (input == 1) { pf = (struct ixl_pf *)arg1; ixl_print_debug_info(pf); } return (error); } static void ixl_print_debug_info(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; struct ixl_queue *que = vsi->queues; struct rx_ring *rxr = &que->rxr; struct tx_ring *txr = &que->txr; u32 reg; printf("Queue irqs = %jx\n", (uintmax_t)que->irqs); printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq); printf("RX next check = %x\n", rxr->next_check); printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done); printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets); printf("TX desc avail = %x\n", txr->avail); reg = rd32(hw, I40E_GLV_GORCL(0xc)); printf("RX Bytes = %x\n", reg); reg = rd32(hw, I40E_GLPRT_GORCL(hw->port)); printf("Port RX Bytes = %x\n", reg); reg = rd32(hw, I40E_GLV_RDPC(0xc)); printf("RX discard = %x\n", reg); reg = rd32(hw, I40E_GLPRT_RDPC(hw->port)); printf("Port RX discard = %x\n", reg); reg = rd32(hw, I40E_GLV_TEPC(0xc)); printf("TX errors = %x\n", reg); reg = rd32(hw, I40E_GLV_GOTCL(0xc)); printf("TX Bytes = %x\n", reg); reg = rd32(hw, I40E_GLPRT_RUC(hw->port)); printf("RX undersize = %x\n", reg); reg = rd32(hw, I40E_GLPRT_RFC(hw->port)); printf("RX fragments = %x\n", reg); reg = rd32(hw, I40E_GLPRT_ROC(hw->port)); printf("RX oversize = %x\n", reg); reg = rd32(hw, I40E_GLPRT_RLEC(hw->port)); printf("RX length error = %x\n", reg); reg = rd32(hw, I40E_GLPRT_MRFC(hw->port)); printf("mac remote fault = %x\n", reg); reg = rd32(hw, I40E_GLPRT_MLFC(hw->port)); printf("mac local fault = %x\n", reg); } /** * Update VSI-specific ethernet statistics counters. **/ void ixl_update_eth_stats(struct ixl_vsi *vsi) { struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_eth_stats *es; struct i40e_eth_stats *oes; struct i40e_hw_port_stats *nsd; u16 stat_idx = vsi->info.stat_counter_idx; es = &vsi->eth_stats; oes = &vsi->eth_stats_offsets; nsd = &pf->stats; /* Gather up the stats that the hw collects */ ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx), vsi->stat_offsets_loaded, &oes->tx_errors, &es->tx_errors); ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx), vsi->stat_offsets_loaded, &oes->rx_discards, &es->rx_discards); ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx), I40E_GLV_GORCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_bytes, &es->rx_bytes); ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), I40E_GLV_UPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_unicast, &es->rx_unicast); ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), I40E_GLV_MPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_multicast, &es->rx_multicast); ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), I40E_GLV_BPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_broadcast, &es->rx_broadcast); ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), I40E_GLV_GOTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_bytes, &es->tx_bytes); ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), I40E_GLV_UPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_unicast, &es->tx_unicast); ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), I40E_GLV_MPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_multicast, &es->tx_multicast); ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), I40E_GLV_BPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_broadcast, &es->tx_broadcast); vsi->stat_offsets_loaded = true; } static void ixl_update_vsi_stats(struct ixl_vsi *vsi) { struct ixl_pf *pf; struct ifnet *ifp; struct i40e_eth_stats *es; u64 tx_discards; struct i40e_hw_port_stats *nsd; pf = vsi->back; ifp = vsi->ifp; es = &vsi->eth_stats; nsd = &pf->stats; ixl_update_eth_stats(vsi); tx_discards = es->tx_discards + nsd->tx_dropped_link_down; for (int i = 0; i < vsi->num_queues; i++) tx_discards += vsi->queues[i].txr.br->br_drops; /* Update ifnet stats */ IXL_SET_IPACKETS(vsi, es->rx_unicast + es->rx_multicast + es->rx_broadcast); IXL_SET_OPACKETS(vsi, es->tx_unicast + es->tx_multicast + es->tx_broadcast); IXL_SET_IBYTES(vsi, es->rx_bytes); IXL_SET_OBYTES(vsi, es->tx_bytes); IXL_SET_IMCASTS(vsi, es->rx_multicast); IXL_SET_OMCASTS(vsi, es->tx_multicast); IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes + nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments + nsd->rx_jabber); IXL_SET_OERRORS(vsi, es->tx_errors); IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards); IXL_SET_OQDROPS(vsi, tx_discards); IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol); IXL_SET_COLLISIONS(vsi, 0); } /** * Reset all of the stats for the given pf **/ void ixl_pf_reset_stats(struct ixl_pf *pf) { bzero(&pf->stats, sizeof(struct i40e_hw_port_stats)); bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats)); pf->stat_offsets_loaded = false; } /** * Resets all stats of the given vsi **/ void ixl_vsi_reset_stats(struct ixl_vsi *vsi) { bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats)); bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats)); vsi->stat_offsets_loaded = false; } /** * Read and update a 48 bit stat from the hw * * Since the device stats are not reset at PFReset, they likely will not * be zeroed when the driver starts. We'll save the first values read * and use them as offsets to be subtracted from the raw values in order * to report stats that count from zero. **/ static void ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, bool offset_loaded, u64 *offset, u64 *stat) { u64 new_data; #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__) new_data = rd64(hw, loreg); #else /* * Use two rd32's instead of one rd64; FreeBSD versions before * 10 don't support 8 byte bus reads/writes. */ new_data = rd32(hw, loreg); new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; #endif if (!offset_loaded) *offset = new_data; if (new_data >= *offset) *stat = new_data - *offset; else *stat = (new_data + ((u64)1 << 48)) - *offset; *stat &= 0xFFFFFFFFFFFFULL; } /** * Read and update a 32 bit stat from the hw **/ static void ixl_stat_update32(struct i40e_hw *hw, u32 reg, bool offset_loaded, u64 *offset, u64 *stat) { u32 new_data; new_data = rd32(hw, reg); if (!offset_loaded) *offset = new_data; if (new_data >= *offset) *stat = (u32)(new_data - *offset); else *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); } static void ixl_add_device_sysctls(struct ixl_pf *pf) { device_t dev = pf->dev; /* Set up sysctls */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW, - pf, 0, ixl_set_flowcntl, "I", "Flow Control"); + pf, 0, ixl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW, - pf, 0, ixl_set_advertise, "I", "Advertised Speed"); + pf, 0, ixl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_current_speed, "A", "Current Port Speed"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_show_fw, "A", "Firmware version"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rx_itr", CTLFLAG_RW, &ixl_rx_itr, IXL_ITR_8K, "RX ITR"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW, &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "tx_itr", CTLFLAG_RW, &ixl_tx_itr, IXL_ITR_4K, "TX ITR"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW, &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR"); #ifdef IXL_DEBUG_SYSCTL SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0, ixl_debug_info, "I", "Debug Information"); - /* Debug shared-code message level */ + /* Shared-code debug message level */ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug_mask", CTLFLAG_RW, &pf->hw.debug_mask, 0, "Debug Message Level"); - SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), - SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), - OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl, - 0, "PF/VF Virtual Channel debug level"); - SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD, - pf, 0, ixl_sysctl_link_status, "A", "Current Link Status"); + pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration"); + +#ifdef PCI_IOV + SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), + OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl, + 0, "PF/VF Virtual Channel debug level"); #endif +#endif } /* ** Set flow control using sysctl: ** 0 - off ** 1 - rx pause ** 2 - tx pause ** 3 - full */ static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS) { /* * TODO: ensure tx CRC by hardware should be enabled * if tx flow control is enabled. * ^ N/A for 40G ports */ struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; int error = 0; enum i40e_status_code aq_error = 0; u8 fc_aq_err = 0; /* Get request */ error = sysctl_handle_int(oidp, &pf->fc, 0, req); if ((error) || (req->newptr == NULL)) return (error); if (pf->fc < 0 || pf->fc > 3) { device_printf(dev, "Invalid fc mode; valid modes are 0 through 3\n"); return (EINVAL); } /* ** Changing flow control mode currently does not work on ** 40GBASE-CR4 PHYs */ if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4 || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) { device_printf(dev, "Changing flow control mode unsupported" " on 40GBase-CR4 media.\n"); return (ENODEV); } /* Set fc ability for port */ hw->fc.requested_mode = pf->fc; aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE); if (aq_error) { device_printf(dev, "%s: Error setting new fc mode %d; fc_err %#x\n", __func__, aq_error, fc_aq_err); return (EIO); } /* Get new link state */ i40e_msec_delay(250); hw->phy.get_link_info = TRUE; i40e_get_link_status(hw, &pf->link_up); return (0); } static int ixl_current_speed(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; int error = 0, index = 0; char *speeds[] = { "Unknown", "100M", "1G", "10G", "40G", "20G" }; ixl_update_link_status(pf); switch (hw->phy.link_info.link_speed) { case I40E_LINK_SPEED_100MB: index = 1; break; case I40E_LINK_SPEED_1GB: index = 2; break; case I40E_LINK_SPEED_10GB: index = 3; break; case I40E_LINK_SPEED_40GB: index = 4; break; case I40E_LINK_SPEED_20GB: index = 5; break; case I40E_LINK_SPEED_UNKNOWN: default: index = 0; break; } error = sysctl_handle_string(oidp, speeds[index], strlen(speeds[index]), req); return (error); } static int ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_aq_set_phy_config config; enum i40e_status_code aq_error = 0; /* Get current capability information */ aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL); if (aq_error) { device_printf(dev, "%s: Error getting phy capabilities %d," " aq error: %d\n", __func__, aq_error, hw->aq.asq_last_status); return (EAGAIN); } /* Prepare new config */ bzero(&config, sizeof(config)); config.phy_type = abilities.phy_type; config.abilities = abilities.abilities | I40E_AQ_PHY_ENABLE_ATOMIC_LINK; config.eee_capability = abilities.eee_capability; config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; /* Translate into aq cmd link_speed */ if (speeds & 0x8) config.link_speed |= I40E_LINK_SPEED_20GB; if (speeds & 0x4) config.link_speed |= I40E_LINK_SPEED_10GB; if (speeds & 0x2) config.link_speed |= I40E_LINK_SPEED_1GB; if (speeds & 0x1) config.link_speed |= I40E_LINK_SPEED_100MB; /* Do aq command & restart link */ aq_error = i40e_aq_set_phy_config(hw, &config, NULL); if (aq_error) { device_printf(dev, "%s: Error setting new phy config %d," " aq error: %d\n", __func__, aq_error, hw->aq.asq_last_status); return (EAGAIN); } /* ** This seems a bit heavy handed, but we ** need to get a reinit on some devices */ IXL_PF_LOCK(pf); ixl_stop_locked(pf); ixl_init_locked(pf); IXL_PF_UNLOCK(pf); return (0); } /* ** Control link advertise speed: ** Flags: ** 0x1 - advertise 100 Mb ** 0x2 - advertise 1G ** 0x4 - advertise 10G ** 0x8 - advertise 20G ** ** Does not work on 40G devices. */ static int ixl_set_advertise(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; int requested_ls = 0; int error = 0; /* ** FW doesn't support changing advertised speed ** for 40G devices; speed is always 40G. */ if (i40e_is_40G_device(hw->device_id)) return (ENODEV); /* Read in new mode */ requested_ls = pf->advertised_speed; error = sysctl_handle_int(oidp, &requested_ls, 0, req); if ((error) || (req->newptr == NULL)) return (error); /* Check for sane value */ if (requested_ls < 0x1 || requested_ls > 0xE) { device_printf(dev, "Invalid advertised speed; " "valid modes are 0x1 through 0xE\n"); return (EINVAL); } /* Then check for validity based on adapter type */ switch (hw->device_id) { case I40E_DEV_ID_10G_BASE_T: case I40E_DEV_ID_10G_BASE_T4: if (requested_ls & 0x8) { device_printf(dev, "20Gbs speed not supported on this device.\n"); return (EINVAL); } break; case I40E_DEV_ID_20G_KR2: case I40E_DEV_ID_20G_KR2_A: if (requested_ls & 0x1) { device_printf(dev, "100Mbs speed not supported on this device.\n"); return (EINVAL); } break; default: if (requested_ls & ~0x6) { device_printf(dev, "Only 1/10Gbs speeds are supported on this device.\n"); return (EINVAL); } break; } /* Exit if no change */ if (pf->advertised_speed == requested_ls) return (0); error = ixl_set_advertised_speeds(pf, requested_ls); if (error) return (error); pf->advertised_speed = requested_ls; ixl_update_link_status(pf); return (0); } /* ** Get the width and transaction speed of ** the bus this adapter is plugged into. */ static u16 ixl_get_bus_info(struct i40e_hw *hw, device_t dev) { u16 link; u32 offset; /* Get the PCI Express Capabilities offset */ pci_find_cap(dev, PCIY_EXPRESS, &offset); /* ...and read the Link Status Register */ link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); switch (link & I40E_PCI_LINK_WIDTH) { case I40E_PCI_LINK_WIDTH_1: hw->bus.width = i40e_bus_width_pcie_x1; break; case I40E_PCI_LINK_WIDTH_2: hw->bus.width = i40e_bus_width_pcie_x2; break; case I40E_PCI_LINK_WIDTH_4: hw->bus.width = i40e_bus_width_pcie_x4; break; case I40E_PCI_LINK_WIDTH_8: hw->bus.width = i40e_bus_width_pcie_x8; break; default: hw->bus.width = i40e_bus_width_unknown; break; } switch (link & I40E_PCI_LINK_SPEED) { case I40E_PCI_LINK_SPEED_2500: hw->bus.speed = i40e_bus_speed_2500; break; case I40E_PCI_LINK_SPEED_5000: hw->bus.speed = i40e_bus_speed_5000; break; case I40E_PCI_LINK_SPEED_8000: hw->bus.speed = i40e_bus_speed_8000; break; default: hw->bus.speed = i40e_bus_speed_unknown; break; } device_printf(dev,"PCI Express Bus: Speed %s %s\n", ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s": (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s": (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"), (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" : (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" : (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" : ("Unknown")); if ((hw->bus.width <= i40e_bus_width_pcie_x8) && (hw->bus.speed < i40e_bus_speed_8000)) { device_printf(dev, "PCI-Express bandwidth available" " for this device\n may be insufficient for" " optimal performance.\n"); device_printf(dev, "For expected performance a x8 " "PCIE Gen3 slot is required.\n"); } return (link); } static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; char buf[32]; snprintf(buf, sizeof(buf), "f%d.%d a%d.%d n%02x.%02x e%08x", hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.api_maj_ver, hw->aq.api_min_ver, (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >> IXL_NVM_VERSION_HI_SHIFT, (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >> IXL_NVM_VERSION_LO_SHIFT, hw->nvm.eetrack); return (sysctl_handle_string(oidp, buf, strlen(buf), req)); } static int ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd) { struct i40e_hw *hw = &pf->hw; struct i40e_nvm_access *nvma; device_t dev = pf->dev; enum i40e_status_code status = 0; int perrno; DEBUGFUNC("ixl_handle_nvmupd_cmd"); if (ifd->ifd_len < sizeof(struct i40e_nvm_access) || ifd->ifd_data == NULL) { device_printf(dev, "%s: incorrect ifdrv length or data pointer\n", __func__); device_printf(dev, "%s: ifdrv length: %lu, sizeof(struct i40e_nvm_access): %lu\n", __func__, ifd->ifd_len, sizeof(struct i40e_nvm_access)); device_printf(dev, "%s: data pointer: %p\n", __func__, ifd->ifd_data); return (EINVAL); } nvma = (struct i40e_nvm_access *)ifd->ifd_data; if (pf->state & IXL_PF_STATE_EMPR_RESETTING) { int count = 0; while (count++ < 100) { i40e_msec_delay(100); if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) break; } // device_printf(dev, "ioctl EMPR reset wait count %d\n", count); } if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) { IXL_PF_LOCK(pf); status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno); IXL_PF_UNLOCK(pf); } else { perrno = -EBUSY; } if (status) device_printf(dev, "i40e_nvmupd_command status %d, perrno %d\n", status, perrno); /* * -EPERM is actually ERESTART, which the kernel interprets as it needing * to run this ioctl again. So use -EACCES for -EPERM instead. */ if (perrno == -EPERM) return (-EACCES); else return (perrno); } #ifdef IXL_DEBUG_SYSCTL static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; struct i40e_link_status link_status; char buf[512]; enum i40e_status_code aq_error = 0; aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL); if (aq_error) { printf("i40e_aq_get_link_info() error %d\n", aq_error); return (EPERM); } sprintf(buf, "\n" "PHY Type : %#04x\n" "Speed : %#04x\n" "Link info: %#04x\n" "AN info : %#04x\n" - "Ext info : %#04x", + "Ext info : %#04x\n" + "Max Frame: %d\n" + "Pacing : %#04x", link_status.phy_type, link_status.link_speed, link_status.link_info, link_status.an_info, - link_status.ext_info); + link_status.ext_info, link_status.max_frame_size, + link_status.pacing); return (sysctl_handle_string(oidp, buf, strlen(buf), req)); } static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; char buf[512]; enum i40e_status_code aq_error = 0; struct i40e_aq_get_phy_abilities_resp abilities; aq_error = i40e_aq_get_phy_capabilities(hw, TRUE, FALSE, &abilities, NULL); if (aq_error) { printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error); return (EPERM); } sprintf(buf, "\n" "PHY Type : %#010x\n" "Speed : %#04x\n" "Abilities: %#04x\n" "EEE cap : %#06x\n" "EEER reg : %#010x\n" "D3 Lpan : %#04x", abilities.phy_type, abilities.link_speed, abilities.abilities, abilities.eee_capability, abilities.eeer_val, abilities.d3_lpan); return (sysctl_handle_string(oidp, buf, strlen(buf), req)); } static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct ixl_vsi *vsi = &pf->vsi; struct ixl_mac_filter *f; char *buf, *buf_i; int error = 0; int ftl_len = 0; int ftl_counter = 0; int buf_len = 0; int entry_len = 42; SLIST_FOREACH(f, &vsi->ftl, next) { ftl_len++; } if (ftl_len < 1) { sysctl_handle_string(oidp, "(none)", 6, req); return (0); } buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2; buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT); sprintf(buf_i++, "\n"); SLIST_FOREACH(f, &vsi->ftl, next) { sprintf(buf_i, MAC_FORMAT ", vlan %4d, flags %#06x", MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); buf_i += entry_len; /* don't print '\n' for last entry */ if (++ftl_counter != ftl_len) { sprintf(buf_i, "\n"); buf_i++; } } error = sysctl_handle_string(oidp, buf, strlen(buf), req); if (error) printf("sysctl error: %d\n", error); free(buf, M_DEVBUF); return error; } #define IXL_SW_RES_SIZE 0x14 static int ixl_res_alloc_cmp(const void *a, const void *b) { const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two; one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a; two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b; return ((int)one->resource_type - (int)two->resource_type); } /* * Longest string length: 25 */ static char * ixl_switch_res_type_string(u8 type) { static char * ixl_switch_res_type_strings[0x14] = { "VEB", "VSI", "Perfect Match MAC address", "S-tag", "(Reserved)", "Multicast hash entry", "Unicast hash entry", "VLAN", "VSI List entry", "(Reserved)", "VLAN Statistic Pool", "Mirror Rule", "Queue Set", "Inner VLAN Forward filter", "(Reserved)", "Inner MAC", "IP", "GRE/VN1 Key", "VN2 Key", "Tunneling Port" }; if (type < 0x14) return ixl_switch_res_type_strings[type]; else return "(Reserved)"; } static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *buf; int error = 0; u8 num_entries; struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE]; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for output.\n"); return (ENOMEM); } bzero(resp, sizeof(resp)); error = i40e_aq_get_switch_resource_alloc(hw, &num_entries, resp, IXL_SW_RES_SIZE, NULL); if (error) { device_printf(dev, "%s: get_switch_resource_alloc() error %d, aq error %d\n", __func__, error, hw->aq.asq_last_status); sbuf_delete(buf); return error; } /* Sort entries by type for display */ qsort(resp, num_entries, sizeof(struct i40e_aqc_switch_resource_alloc_element_resp), &ixl_res_alloc_cmp); sbuf_cat(buf, "\n"); sbuf_printf(buf, "# of entries: %d\n", num_entries); sbuf_printf(buf, #if 0 "Type | Guaranteed | Total | Used | Un-allocated\n" " | (this) | (all) | (this) | (all) \n"); #endif " Type | Guaranteed | Total | Used | Un-allocated\n" " | (this) | (all) | (this) | (all) \n"); for (int i = 0; i < num_entries; i++) { sbuf_printf(buf, #if 0 "%#4x | %10d %5d %6d %12d", resp[i].resource_type, #endif "%25s | %10d %5d %6d %12d", ixl_switch_res_type_string(resp[i].resource_type), resp[i].guaranteed, resp[i].total, resp[i].used, resp[i].total_unalloced); if (i < num_entries - 1) sbuf_cat(buf, "\n"); } error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return error; } /* ** Caller must init and delete sbuf; this function will clear and ** finish it for caller. ** ** XXX: Cannot use the SEID for this, since there is no longer a ** fixed mapping between SEID and element type. */ static char * ixl_switch_element_string(struct sbuf *s, struct i40e_aqc_switch_config_element_resp *element) { sbuf_clear(s); switch (element->element_type) { case I40E_AQ_SW_ELEM_TYPE_MAC: sbuf_printf(s, "MAC %3d", element->element_info); break; case I40E_AQ_SW_ELEM_TYPE_PF: sbuf_printf(s, "PF %3d", element->element_info); break; case I40E_AQ_SW_ELEM_TYPE_VF: sbuf_printf(s, "VF %3d", element->element_info); break; case I40E_AQ_SW_ELEM_TYPE_EMP: sbuf_cat(s, "EMP"); break; case I40E_AQ_SW_ELEM_TYPE_BMC: sbuf_cat(s, "BMC"); break; case I40E_AQ_SW_ELEM_TYPE_PV: sbuf_cat(s, "PV"); break; case I40E_AQ_SW_ELEM_TYPE_VEB: sbuf_cat(s, "VEB"); break; case I40E_AQ_SW_ELEM_TYPE_PA: sbuf_cat(s, "PA"); break; case I40E_AQ_SW_ELEM_TYPE_VSI: sbuf_printf(s, "VSI %3d", element->element_info); break; default: sbuf_cat(s, "?"); break; } sbuf_finish(s); return sbuf_data(s); } static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *buf; struct sbuf *nmbuf; int error = 0; u16 next = 0; u8 aq_buf[I40E_AQ_LARGE_BUF]; struct i40e_aqc_get_switch_config_resp *sw_config; sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); return (ENOMEM); } error = i40e_aq_get_switch_config(hw, sw_config, sizeof(aq_buf), &next, NULL); if (error) { device_printf(dev, "%s: aq_get_switch_config() error %d, aq error %d\n", __func__, error, hw->aq.asq_last_status); sbuf_delete(buf); return error; } if (next) device_printf(dev, "%s: TODO: get more config with SEID %d\n", __func__, next); nmbuf = sbuf_new_auto(); if (!nmbuf) { device_printf(dev, "Could not allocate sbuf for name output.\n"); sbuf_delete(buf); return (ENOMEM); } sbuf_cat(buf, "\n"); // Assuming <= 255 elements in switch sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported); sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total); /* Exclude: ** Revision -- all elements are revision 1 for now */ sbuf_printf(buf, "SEID ( Name ) | Uplink | Downlink | Conn Type\n" " | | | (uplink)\n"); for (int i = 0; i < sw_config->header.num_reported; i++) { // "%4d (%8s) | %8s %8s %#8x", sbuf_printf(buf, "%4d", sw_config->element[i].seid); sbuf_cat(buf, " "); sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, &sw_config->element[i])); sbuf_cat(buf, " | "); sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid); sbuf_cat(buf, " "); sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid); sbuf_cat(buf, " "); sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type); if (i < sw_config->header.num_reported - 1) sbuf_cat(buf, "\n"); } sbuf_delete(nmbuf); error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } #endif /* IXL_DEBUG_SYSCTL */ #ifdef PCI_IOV static int ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; struct ixl_vsi *vsi; struct i40e_vsi_context vsi_ctx; int i; uint16_t first_queue; enum i40e_status_code code; hw = &pf->hw; vsi = &pf->vsi; vsi_ctx.pf_num = hw->pf_id; vsi_ctx.uplink_seid = pf->veb_seid; vsi_ctx.connection_type = IXL_VSI_DATA_PORT; vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num; vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF; bzero(&vsi_ctx.info, sizeof(vsi_ctx.info)); vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID); vsi_ctx.info.switch_id = htole16(0); vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID); vsi_ctx.info.sec_flags = 0; if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF) vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK; + /* TODO: If a port VLAN is set, then this needs to be changed */ vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | I40E_AQ_VSI_PVLAN_EMOD_NOTHING; vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID); vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); first_queue = vsi->num_queues + vf->vf_num * IXLV_MAX_QUEUES; for (i = 0; i < IXLV_MAX_QUEUES; i++) vsi_ctx.info.queue_mapping[i] = htole16(first_queue + i); for (; i < nitems(vsi_ctx.info.queue_mapping); i++) vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK); vsi_ctx.info.tc_mapping[0] = htole16( (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | (1 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)); code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL); if (code != I40E_SUCCESS) return (ixl_adminq_err_to_errno(hw->aq.asq_last_status)); vf->vsi.seid = vsi_ctx.seid; vf->vsi.vsi_num = vsi_ctx.vsi_number; vf->vsi.first_queue = first_queue; vf->vsi.num_queues = IXLV_MAX_QUEUES; code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL); if (code != I40E_SUCCESS) return (ixl_adminq_err_to_errno(hw->aq.asq_last_status)); code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL); if (code != I40E_SUCCESS) { device_printf(pf->dev, "Failed to disable BW limit: %d\n", ixl_adminq_err_to_errno(hw->aq.asq_last_status)); return (ixl_adminq_err_to_errno(hw->aq.asq_last_status)); } memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info)); return (0); } static int ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; int error; hw = &pf->hw; error = ixl_vf_alloc_vsi(pf, vf); if (error != 0) return (error); vf->vsi.hw_filters_add = 0; vf->vsi.hw_filters_del = 0; ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY); ixl_reconfigure_filters(&vf->vsi); return (0); } static void ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val) { uint32_t qtable; int index, shift; /* * Two queues are mapped in a single register, so we have to do some * gymnastics to convert the queue number into a register index and * shift. */ index = qnum / 2; shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT; qtable = rd32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num)); qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift); qtable |= val << shift; wr32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable); } static void ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; uint32_t qtable; int i; hw = &pf->hw; /* * Contiguous mappings aren't actually supported by the hardware, * so we have to use non-contiguous mappings. */ wr32(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num), I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num), I40E_VPLAN_MAPENA_TXRX_ENA_MASK); for (i = 0; i < vf->vsi.num_queues; i++) { qtable = (vf->vsi.first_queue + i) << I40E_VPLAN_QTABLE_QINDEX_SHIFT; wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable); } /* Map queues allocated to VF to its VSI. */ for (i = 0; i < vf->vsi.num_queues; i++) ixl_vf_map_vsi_queue(hw, vf, i, vf->vsi.first_queue + i); /* Set rest of VSI queues as unused. */ for (; i < IXL_MAX_VSI_QUEUES; i++) ixl_vf_map_vsi_queue(hw, vf, i, I40E_VSILAN_QTABLE_QINDEX_0_MASK); ixl_flush(hw); } static void ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi) { struct i40e_hw *hw; hw = &pf->hw; if (vsi->seid == 0) return; i40e_aq_delete_element(hw, vsi->seid, NULL); } static void ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg) { wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); ixl_flush(hw); } static void ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg) { wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); ixl_flush(hw); } static void ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; uint32_t vfint_reg, vpint_reg; int i; hw = &pf->hw; ixl_vf_vsi_release(pf, &vf->vsi); /* Index 0 has a special register. */ ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num)); for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) { vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num); ixl_vf_disable_queue_intr(hw, vfint_reg); } /* Index 0 has a special register. */ ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num)); for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) { vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num); ixl_vf_unregister_intr(hw, vpint_reg); } vf->vsi.num_queues = 0; } static int ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; int i; uint16_t global_vf_num; uint32_t ciad; hw = &pf->hw; global_vf_num = hw->func_caps.vf_base_id + vf->vf_num; wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS | (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) { ciad = rd32(hw, I40E_PF_PCI_CIAD); if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0) return (0); DELAY(1); } return (ETIMEDOUT); } static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; uint32_t vfrtrig; hw = &pf->hw; vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num)); vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig); ixl_flush(hw); ixl_reinit_vf(pf, vf); } static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; uint32_t vfrstat, vfrtrig; int i, error; hw = &pf->hw; error = ixl_flush_pcie(pf, vf); if (error != 0) device_printf(pf->dev, "Timed out waiting for PCIe activity to stop on VF-%d\n", vf->vf_num); for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) { DELAY(10); vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num)); if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK) break; } if (i == IXL_VF_RESET_TIMEOUT) device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num); wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED); vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num)); vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig); if (vf->vsi.seid != 0) ixl_disable_rings(&vf->vsi); ixl_vf_release_resources(pf, vf); ixl_vf_setup_vsi(pf, vf); ixl_vf_map_queues(pf, vf); wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE); ixl_flush(hw); } static const char * ixl_vc_opcode_str(uint16_t op) { switch (op) { case I40E_VIRTCHNL_OP_VERSION: return ("VERSION"); case I40E_VIRTCHNL_OP_RESET_VF: return ("RESET_VF"); case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: return ("GET_VF_RESOURCES"); case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: return ("CONFIG_TX_QUEUE"); case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE: return ("CONFIG_RX_QUEUE"); case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: return ("CONFIG_VSI_QUEUES"); case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: return ("CONFIG_IRQ_MAP"); case I40E_VIRTCHNL_OP_ENABLE_QUEUES: return ("ENABLE_QUEUES"); case I40E_VIRTCHNL_OP_DISABLE_QUEUES: return ("DISABLE_QUEUES"); case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: return ("ADD_ETHER_ADDRESS"); case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: return ("DEL_ETHER_ADDRESS"); case I40E_VIRTCHNL_OP_ADD_VLAN: return ("ADD_VLAN"); case I40E_VIRTCHNL_OP_DEL_VLAN: return ("DEL_VLAN"); case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: return ("CONFIG_PROMISCUOUS_MODE"); case I40E_VIRTCHNL_OP_GET_STATS: return ("GET_STATS"); case I40E_VIRTCHNL_OP_FCOE: return ("FCOE"); case I40E_VIRTCHNL_OP_EVENT: return ("EVENT"); default: return ("UNKNOWN"); } } static int ixl_vc_opcode_level(uint16_t opcode) { switch (opcode) { case I40E_VIRTCHNL_OP_GET_STATS: return (10); default: return (5); } } static void ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len) { struct i40e_hw *hw; int global_vf_id; hw = &pf->hw; global_vf_id = hw->func_caps.vf_base_id + vf->vf_num; I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op), "Sending msg (op=%s[%d], status=%d) to VF-%d\n", ixl_vc_opcode_str(op), op, status, vf->vf_num); i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL); } static void ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op) { ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0); } static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line) { I40E_VC_DEBUG(pf, 1, "Sending NACK (op=%s[%d], err=%d) to VF-%d from %s:%d\n", ixl_vc_opcode_str(op), op, status, vf->vf_num, file, line); ixl_send_vf_msg(pf, vf, op, status, NULL, 0); } static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct i40e_virtchnl_version_info reply; if (msg_size != sizeof(struct i40e_virtchnl_version_info)) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_ERR_PARAM); return; } reply.major = I40E_VIRTCHNL_VERSION_MAJOR; reply.minor = I40E_VIRTCHNL_VERSION_MINOR; ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply, sizeof(reply)); } static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { if (msg_size != 0) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF, I40E_ERR_PARAM); return; } ixl_reset_vf(pf, vf); /* No response to a reset message. */ } static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct i40e_virtchnl_vf_resource reply; if (msg_size != 0) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, I40E_ERR_PARAM); return; } bzero(&reply, sizeof(reply)); reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2; reply.num_vsis = 1; reply.num_queue_pairs = vf->vsi.num_queues; reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf; reply.vsi_res[0].vsi_id = vf->vsi.vsi_num; reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV; reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues; memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN); ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, I40E_SUCCESS, &reply, sizeof(reply)); } static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct i40e_virtchnl_txq_info *info) { struct i40e_hw *hw; struct i40e_hmc_obj_txq txq; uint16_t global_queue_num, global_vf_num; enum i40e_status_code status; uint32_t qtx_ctl; hw = &pf->hw; global_queue_num = vf->vsi.first_queue + info->queue_id; global_vf_num = hw->func_caps.vf_base_id + vf->vf_num; bzero(&txq, sizeof(txq)); status = i40e_clear_lan_tx_queue_context(hw, global_queue_num); if (status != I40E_SUCCESS) return (EINVAL); txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS; txq.head_wb_ena = info->headwb_enabled; txq.head_wb_addr = info->dma_headwb_addr; txq.qlen = info->ring_len; txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]); txq.rdylist_act = 0; status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq); if (status != I40E_SUCCESS) return (EINVAL); qtx_ctl = I40E_QTX_CTL_VF_QUEUE | (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) | (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT); wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl); ixl_flush(hw); return (0); } static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct i40e_virtchnl_rxq_info *info) { struct i40e_hw *hw; struct i40e_hmc_obj_rxq rxq; uint16_t global_queue_num; enum i40e_status_code status; hw = &pf->hw; global_queue_num = vf->vsi.first_queue + info->queue_id; bzero(&rxq, sizeof(rxq)); if (info->databuffer_size > IXL_VF_MAX_BUFFER) return (EINVAL); if (info->max_pkt_size > IXL_VF_MAX_FRAME || info->max_pkt_size < ETHER_MIN_LEN) return (EINVAL); if (info->splithdr_enabled) { if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER) return (EINVAL); rxq.hsplit_0 = info->rx_split_pos & (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 | I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP | I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP | I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP); rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; rxq.dtype = 2; } status = i40e_clear_lan_rx_queue_context(hw, global_queue_num); if (status != I40E_SUCCESS) return (EINVAL); rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS; rxq.qlen = info->ring_len; rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; rxq.dsize = 1; rxq.crcstrip = 1; rxq.l2tsel = 1; rxq.rxmax = info->max_pkt_size; rxq.tphrdesc_ena = 1; rxq.tphwdesc_ena = 1; rxq.tphdata_ena = 1; rxq.tphhead_ena = 1; rxq.lrxqthresh = 2; rxq.prefena = 1; status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq); if (status != I40E_SUCCESS) return (EINVAL); return (0); } static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct i40e_virtchnl_vsi_queue_config_info *info; struct i40e_virtchnl_queue_pair_info *pair; int i; if (msg_size < sizeof(*info)) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } info = msg; if (info->num_queue_pairs == 0) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } if (msg_size != sizeof(*info) + info->num_queue_pairs * sizeof(*pair)) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } if (info->vsi_id != vf->vsi.vsi_num) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } for (i = 0; i < info->num_queue_pairs; i++) { pair = &info->qpair[i]; if (pair->txq.vsi_id != vf->vsi.vsi_num || pair->rxq.vsi_id != vf->vsi.vsi_num || pair->txq.queue_id != pair->rxq.queue_id || pair->txq.queue_id >= vf->vsi.num_queues) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } } ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES); } static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct i40e_virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue, enum i40e_queue_type *last_type, uint16_t *last_queue) { uint32_t offset, qctl; uint16_t itr_indx; if (cur_type == I40E_QUEUE_TYPE_RX) { offset = I40E_QINT_RQCTL(cur_queue); itr_indx = vector->rxitr_idx; } else { offset = I40E_QINT_TQCTL(cur_queue); itr_indx = vector->txitr_idx; } qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | I40E_QINT_RQCTL_CAUSE_ENA_MASK | (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT)); wr32(&pf->hw, offset, qctl); *last_type = cur_type; *last_queue = cur_queue; } static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct i40e_virtchnl_vector_map *vector) { struct i40e_hw *hw; u_int qindex; enum i40e_queue_type type, last_type; uint32_t lnklst_reg; uint16_t rxq_map, txq_map, cur_queue, last_queue; hw = &pf->hw; rxq_map = vector->rxq_map; txq_map = vector->txq_map; last_queue = IXL_END_OF_INTR_LNKLST; last_type = I40E_QUEUE_TYPE_RX; /* * The datasheet says to optimize performance, RX queues and TX queues * should be interleaved in the interrupt linked list, so we process * both at once here. */ while ((rxq_map != 0) || (txq_map != 0)) { if (txq_map != 0) { qindex = ffs(txq_map) - 1; type = I40E_QUEUE_TYPE_TX; cur_queue = vf->vsi.first_queue + qindex; ixl_vf_set_qctl(pf, vector, type, cur_queue, &last_type, &last_queue); txq_map &= ~(1 << qindex); } if (rxq_map != 0) { qindex = ffs(rxq_map) - 1; type = I40E_QUEUE_TYPE_RX; cur_queue = vf->vsi.first_queue + qindex; ixl_vf_set_qctl(pf, vector, type, cur_queue, &last_type, &last_queue); rxq_map &= ~(1 << qindex); } } if (vector->vector_id == 0) lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num); else lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id, vf->vf_num); wr32(hw, lnklst_reg, (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) | (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)); ixl_flush(hw); } static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct i40e_virtchnl_irq_map_info *map; struct i40e_virtchnl_vector_map *vector; struct i40e_hw *hw; int i, largest_txq, largest_rxq; hw = &pf->hw; if (msg_size < sizeof(*map)) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } map = msg; if (map->num_vectors == 0) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } for (i = 0; i < map->num_vectors; i++) { vector = &map->vecmap[i]; if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) || vector->vsi_id != vf->vsi.vsi_num) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } if (vector->rxq_map != 0) { largest_rxq = fls(vector->rxq_map) - 1; if (largest_rxq >= vf->vsi.num_queues) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } } if (vector->txq_map != 0) { largest_txq = fls(vector->txq_map) - 1; if (largest_txq >= vf->vsi.num_queues) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } } if (vector->rxitr_idx > IXL_MAX_ITR_IDX || vector->txitr_idx > IXL_MAX_ITR_IDX) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } ixl_vf_config_vector(pf, vf, vector); } ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP); } static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct i40e_virtchnl_queue_select *select; int error; if (msg_size != sizeof(*select)) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, I40E_ERR_PARAM); return; } select = msg; if (select->vsi_id != vf->vsi.vsi_num || select->rx_queues == 0 || select->tx_queues == 0) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, I40E_ERR_PARAM); return; } error = ixl_enable_rings(&vf->vsi); if (error) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, I40E_ERR_TIMEOUT); return; } ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES); } static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct i40e_virtchnl_queue_select *select; int error; if (msg_size != sizeof(*select)) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, I40E_ERR_PARAM); return; } select = msg; if (select->vsi_id != vf->vsi.vsi_num || select->rx_queues == 0 || select->tx_queues == 0) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, I40E_ERR_PARAM); return; } error = ixl_disable_rings(&vf->vsi); if (error) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, I40E_ERR_TIMEOUT); return; } ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES); } static boolean_t ixl_zero_mac(const uint8_t *addr) { uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0}; return (cmp_etheraddr(addr, zero)); } static boolean_t ixl_bcast_mac(const uint8_t *addr) { return (cmp_etheraddr(addr, ixl_bcast_addr)); } static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr) { if (ixl_zero_mac(addr) || ixl_bcast_mac(addr)) return (EINVAL); /* * If the VF is not allowed to change its MAC address, don't let it * set a MAC filter for an address that is not a multicast address and * is not its assigned MAC. */ if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) && !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac))) return (EPERM); return (0); } static void ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct i40e_virtchnl_ether_addr_list *addr_list; struct i40e_virtchnl_ether_addr *addr; struct ixl_vsi *vsi; int i; size_t expected_size; vsi = &vf->vsi; if (msg_size < sizeof(*addr_list)) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM); return; } addr_list = msg; expected_size = sizeof(*addr_list) + addr_list->num_elements * sizeof(*addr); if (addr_list->num_elements == 0 || addr_list->vsi_id != vsi->vsi_num || msg_size != expected_size) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM); return; } for (i = 0; i < addr_list->num_elements; i++) { if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM); return; } } for (i = 0; i < addr_list->num_elements; i++) { addr = &addr_list->list[i]; ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY); } ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS); } static void ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct i40e_virtchnl_ether_addr_list *addr_list; struct i40e_virtchnl_ether_addr *addr; size_t expected_size; int i; if (msg_size < sizeof(*addr_list)) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM); return; } addr_list = msg; expected_size = sizeof(*addr_list) + addr_list->num_elements * sizeof(*addr); if (addr_list->num_elements == 0 || addr_list->vsi_id != vf->vsi.vsi_num || msg_size != expected_size) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM); return; } for (i = 0; i < addr_list->num_elements; i++) { addr = &addr_list->list[i]; if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM); return; } } for (i = 0; i < addr_list->num_elements; i++) { addr = &addr_list->list[i]; ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY); } ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS); } static enum i40e_status_code ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_vsi_context vsi_ctx; vsi_ctx.seid = vf->vsi.seid; bzero(&vsi_ctx.info, sizeof(vsi_ctx.info)); vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL)); } static void ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct i40e_virtchnl_vlan_filter_list *filter_list; enum i40e_status_code code; size_t expected_size; int i; if (msg_size < sizeof(*filter_list)) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); return; } filter_list = msg; expected_size = sizeof(*filter_list) + filter_list->num_elements * sizeof(uint16_t); if (filter_list->num_elements == 0 || filter_list->vsi_id != vf->vsi.vsi_num || msg_size != expected_size) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); return; } if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); return; } for (i = 0; i < filter_list->num_elements; i++) { if (filter_list->vlan_id[i] > EVL_VLID_MASK) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); return; } } code = ixl_vf_enable_vlan_strip(pf, vf); if (code != I40E_SUCCESS) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); } for (i = 0; i < filter_list->num_elements; i++) ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]); ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN); } static void ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct i40e_virtchnl_vlan_filter_list *filter_list; int i; size_t expected_size; if (msg_size < sizeof(*filter_list)) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN, I40E_ERR_PARAM); return; } filter_list = msg; expected_size = sizeof(*filter_list) + filter_list->num_elements * sizeof(uint16_t); if (filter_list->num_elements == 0 || filter_list->vsi_id != vf->vsi.vsi_num || msg_size != expected_size) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN, I40E_ERR_PARAM); return; } for (i = 0; i < filter_list->num_elements; i++) { if (filter_list->vlan_id[i] > EVL_VLID_MASK) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); return; } } if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); return; } for (i = 0; i < filter_list->num_elements; i++) ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]); ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN); } static void ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct i40e_virtchnl_promisc_info *info; enum i40e_status_code code; if (msg_size != sizeof(*info)) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM); return; } if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM); return; } info = msg; if (info->vsi_id != vf->vsi.vsi_num) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM); return; } code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id, info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL); if (code != I40E_SUCCESS) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code); return; } code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id, info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL); if (code != I40E_SUCCESS) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code); return; } ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE); } static void ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct i40e_virtchnl_queue_select *queue; if (msg_size != sizeof(*queue)) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS, I40E_ERR_PARAM); return; } queue = msg; if (queue->vsi_id != vf->vsi.vsi_num) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS, I40E_ERR_PARAM); return; } ixl_update_eth_stats(&vf->vsi); ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS, I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats)); } static void ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event) { struct ixl_vf *vf; void *msg; uint16_t vf_num, msg_size; uint32_t opcode; vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id; opcode = le32toh(event->desc.cookie_high); if (vf_num >= pf->num_vfs) { device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num); return; } vf = &pf->vfs[vf_num]; msg = event->msg_buf; msg_size = event->msg_len; I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode), "Got msg %s(%d) from VF-%d of size %d\n", ixl_vc_opcode_str(opcode), opcode, vf_num, msg_size); switch (opcode) { case I40E_VIRTCHNL_OP_VERSION: ixl_vf_version_msg(pf, vf, msg, msg_size); break; case I40E_VIRTCHNL_OP_RESET_VF: ixl_vf_reset_msg(pf, vf, msg, msg_size); break; case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: ixl_vf_get_resources_msg(pf, vf, msg, msg_size); break; case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: ixl_vf_config_vsi_msg(pf, vf, msg, msg_size); break; case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: ixl_vf_config_irq_msg(pf, vf, msg, msg_size); break; case I40E_VIRTCHNL_OP_ENABLE_QUEUES: ixl_vf_enable_queues_msg(pf, vf, msg, msg_size); break; case I40E_VIRTCHNL_OP_DISABLE_QUEUES: ixl_vf_disable_queues_msg(pf, vf, msg, msg_size); break; case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: ixl_vf_add_mac_msg(pf, vf, msg, msg_size); break; case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: ixl_vf_del_mac_msg(pf, vf, msg, msg_size); break; case I40E_VIRTCHNL_OP_ADD_VLAN: ixl_vf_add_vlan_msg(pf, vf, msg, msg_size); break; case I40E_VIRTCHNL_OP_DEL_VLAN: ixl_vf_del_vlan_msg(pf, vf, msg, msg_size); break; case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: ixl_vf_config_promisc_msg(pf, vf, msg, msg_size); break; case I40E_VIRTCHNL_OP_GET_STATS: ixl_vf_get_stats_msg(pf, vf, msg, msg_size); break; /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */ case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE: default: i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED); break; } } /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */ static void ixl_handle_vflr(void *arg, int pending) { struct ixl_pf *pf; struct i40e_hw *hw; uint16_t global_vf_num; uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0; int i; pf = arg; hw = &pf->hw; IXL_PF_LOCK(pf); for (i = 0; i < pf->num_vfs; i++) { global_vf_num = hw->func_caps.vf_base_id + i; vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num); vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num); vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index)); if (vflrstat & vflrstat_mask) { wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index), vflrstat_mask); ixl_reinit_vf(pf, &pf->vfs[i]); } } icr0 = rd32(hw, I40E_PFINT_ICR0_ENA); icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, icr0); ixl_flush(hw); IXL_PF_UNLOCK(pf); } static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err) { switch (err) { case I40E_AQ_RC_EPERM: return (EPERM); case I40E_AQ_RC_ENOENT: return (ENOENT); case I40E_AQ_RC_ESRCH: return (ESRCH); case I40E_AQ_RC_EINTR: return (EINTR); case I40E_AQ_RC_EIO: return (EIO); case I40E_AQ_RC_ENXIO: return (ENXIO); case I40E_AQ_RC_E2BIG: return (E2BIG); case I40E_AQ_RC_EAGAIN: return (EAGAIN); case I40E_AQ_RC_ENOMEM: return (ENOMEM); case I40E_AQ_RC_EACCES: return (EACCES); case I40E_AQ_RC_EFAULT: return (EFAULT); case I40E_AQ_RC_EBUSY: return (EBUSY); case I40E_AQ_RC_EEXIST: return (EEXIST); case I40E_AQ_RC_EINVAL: return (EINVAL); case I40E_AQ_RC_ENOTTY: return (ENOTTY); case I40E_AQ_RC_ENOSPC: return (ENOSPC); case I40E_AQ_RC_ENOSYS: return (ENOSYS); case I40E_AQ_RC_ERANGE: return (ERANGE); case I40E_AQ_RC_EFLUSHED: return (EINVAL); /* No exact equivalent in errno.h */ case I40E_AQ_RC_BAD_ADDR: return (EFAULT); case I40E_AQ_RC_EMODE: return (EPERM); case I40E_AQ_RC_EFBIG: return (EFBIG); default: return (EINVAL); } } static int ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params) { struct ixl_pf *pf; struct i40e_hw *hw; struct ixl_vsi *pf_vsi; enum i40e_status_code ret; int i, error; pf = device_get_softc(dev); hw = &pf->hw; pf_vsi = &pf->vsi; IXL_PF_LOCK(pf); pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT | M_ZERO); if (pf->vfs == NULL) { error = ENOMEM; goto fail; } for (i = 0; i < num_vfs; i++) sysctl_ctx_init(&pf->vfs[i].ctx); ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid, 1, FALSE, FALSE, &pf->veb_seid, NULL); if (ret != I40E_SUCCESS) { error = ixl_adminq_err_to_errno(hw->aq.asq_last_status); device_printf(dev, "add_veb failed; code=%d error=%d", ret, error); goto fail; } ixl_configure_msix(pf); ixl_enable_adminq(hw); pf->num_vfs = num_vfs; IXL_PF_UNLOCK(pf); return (0); fail: free(pf->vfs, M_IXL); pf->vfs = NULL; IXL_PF_UNLOCK(pf); return (error); } static void ixl_iov_uninit(device_t dev) { struct ixl_pf *pf; struct i40e_hw *hw; struct ixl_vsi *vsi; struct ifnet *ifp; struct ixl_vf *vfs; int i, num_vfs; pf = device_get_softc(dev); hw = &pf->hw; vsi = &pf->vsi; ifp = vsi->ifp; IXL_PF_LOCK(pf); for (i = 0; i < pf->num_vfs; i++) { if (pf->vfs[i].vsi.seid != 0) i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL); } if (pf->veb_seid != 0) { i40e_aq_delete_element(hw, pf->veb_seid, NULL); pf->veb_seid = 0; } if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) ixl_disable_intr(vsi); vfs = pf->vfs; num_vfs = pf->num_vfs; pf->vfs = NULL; pf->num_vfs = 0; IXL_PF_UNLOCK(pf); /* Do this after the unlock as sysctl_ctx_free might sleep. */ for (i = 0; i < num_vfs; i++) sysctl_ctx_free(&vfs[i].ctx); free(vfs, M_IXL); } static int ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) { char sysctl_name[QUEUE_NAME_LEN]; struct ixl_pf *pf; struct ixl_vf *vf; const void *mac; size_t size; int error; pf = device_get_softc(dev); vf = &pf->vfs[vfnum]; IXL_PF_LOCK(pf); vf->vf_num = vfnum; vf->vsi.back = pf; vf->vf_flags = VF_FLAG_ENABLED; SLIST_INIT(&vf->vsi.ftl); error = ixl_vf_setup_vsi(pf, vf); if (error != 0) goto out; if (nvlist_exists_binary(params, "mac-addr")) { mac = nvlist_get_binary(params, "mac-addr", &size); bcopy(mac, vf->mac, ETHER_ADDR_LEN); if (nvlist_get_bool(params, "allow-set-mac")) vf->vf_flags |= VF_FLAG_SET_MAC_CAP; } else /* * If the administrator has not specified a MAC address then * we must allow the VF to choose one. */ vf->vf_flags |= VF_FLAG_SET_MAC_CAP; if (nvlist_get_bool(params, "mac-anti-spoof")) vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF; if (nvlist_get_bool(params, "allow-promisc")) vf->vf_flags |= VF_FLAG_PROMISC_CAP; vf->vf_flags |= VF_FLAG_VLAN_CAP; ixl_reset_vf(pf, vf); out: IXL_PF_UNLOCK(pf); if (error == 0) { snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum); ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name); } return (error); } #endif /* PCI_IOV */ Index: head/sys/dev/ixl/ixl_pf.h =================================================================== --- head/sys/dev/ixl/ixl_pf.h (revision 299550) +++ head/sys/dev/ixl/ixl_pf.h (revision 299551) @@ -1,152 +1,165 @@ /****************************************************************************** Copyright (c) 2013-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _IXL_PF_H_ #define _IXL_PF_H_ #define VF_FLAG_ENABLED 0x01 #define VF_FLAG_SET_MAC_CAP 0x02 #define VF_FLAG_VLAN_CAP 0x04 #define VF_FLAG_PROMISC_CAP 0x08 #define VF_FLAG_MAC_ANTI_SPOOF 0x10 #define IXL_PF_STATE_EMPR_RESETTING (1 << 0) struct ixl_vf { struct ixl_vsi vsi; uint32_t vf_flags; uint8_t mac[ETHER_ADDR_LEN]; uint16_t vf_num; struct sysctl_ctx_list ctx; }; /* Physical controller structure */ struct ixl_pf { struct i40e_hw hw; struct i40e_osdep osdep; struct device *dev; struct resource *pci_mem; struct resource *msix_mem; /* * Interrupt resources: this set is * either used for legacy, or for Link * when doing MSIX */ void *tag; struct resource *res; struct callout timer; int msix; int if_flags; int state; struct mtx pf_mtx; u32 qbase; u32 admvec; struct task adminq; struct taskqueue *tq; bool link_up; u32 link_speed; int advertised_speed; int fc; /* local flow ctrl setting */ /* ** Network interfaces ** These are the traffic class holders, and ** will have a stack interface and queues ** associated with them. ** NOTE: The PF has only a single interface, ** so it is embedded in the PF struct. */ struct ixl_vsi vsi; /* Misc stats maintained by the driver */ u64 watchdog_events; u64 admin_irq; /* Statistics from hw */ struct i40e_hw_port_stats stats; struct i40e_hw_port_stats stats_offsets; bool stat_offsets_loaded; struct ixl_vf *vfs; int num_vfs; uint16_t veb_seid; struct task vflr_task; int vc_debug_lvl; }; /* * Defines used for NVM update ioctls. * This value is used in the Solaris tool, too. */ #define I40E_NVM_ACCESS \ (((((((('E' << 4) + '1') << 4) + 'K') << 4) + 'G') << 4) | 5) #define IXL_DEFAULT_PHY_INT_MASK \ - (I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL) + ((~(I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL \ + | I40E_AQ_EVENT_MEDIA_NA)) & 0x3FF) -#define IXL_SET_ADVERTISE_HELP \ -"Control link advertise speed:\n" \ -"\tFlags:\n" \ -"\t\t0x1 - advertise 100 Mb\n" \ -"\t\t0x2 - advertise 1G\n" \ -"\t\t0x4 - advertise 10G\n" \ -"\t\t0x8 - advertise 20G\n\n" \ -"\tDoes not work on 40G devices." +/* Sysctl help messages; displayed with "sysctl -d" */ +#define IXL_SYSCTL_HELP_SET_ADVERTISE \ +"\nControl advertised link speed.\n" \ +"Flags:\n" \ +"\t0x1 - advertise 100M\n" \ +"\t0x2 - advertise 1G\n" \ +"\t0x4 - advertise 10G\n" \ +"\t0x8 - advertise 20G\n\n" \ +"Operation not supported on 40G devices." + +#define IXL_SYSCTL_HELP_FC \ +"\nSet flow control mode using the values below.\n" \ +"\t0 - off\n" \ +"\t1 - rx pause\n" \ +"\t2 - tx pause\n" \ +"\t3 - tx and rx pause" + +#define IXL_SYSCTL_HELP_LINK_STATUS \ +"\nExecutes a \"Get Link Status\" command on the Admin Queue, and displays" \ +" the response." \ #define I40E_VC_DEBUG(pf, level, ...) \ do { \ if ((pf)->vc_debug_lvl >= (level)) \ device_printf((pf)->dev, __VA_ARGS__); \ } while (0) #define i40e_send_vf_nack(pf, vf, op, st) \ ixl_send_vf_nack_msg((pf), (vf), (op), (st), __FILE__, __LINE__) #define IXL_PF_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->pf_mtx, _name, "IXL PF Lock", MTX_DEF) #define IXL_PF_LOCK(_sc) mtx_lock(&(_sc)->pf_mtx) #define IXL_PF_UNLOCK(_sc) mtx_unlock(&(_sc)->pf_mtx) #define IXL_PF_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->pf_mtx) #define IXL_PF_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->pf_mtx, MA_OWNED) #endif /* _IXL_PF_H_ */