Index: head/sys/dev/ixl/i40e_adminq_cmd.h =================================================================== --- head/sys/dev/ixl/i40e_adminq_cmd.h (revision 365230) +++ head/sys/dev/ixl/i40e_adminq_cmd.h (revision 365231) @@ -1,2959 +1,2988 @@ /****************************************************************************** Copyright (c) 2013-2018, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _I40E_ADMINQ_CMD_H_ #define _I40E_ADMINQ_CMD_H_ /* This header file defines the i40e Admin Queue commands and is shared between * i40e Firmware and Software. * * This file needs to comply with the Linux Kernel coding style. */ #define I40E_FW_API_VERSION_MAJOR 0x0001 #define I40E_FW_API_VERSION_MINOR_X722 0x000A #define I40E_FW_API_VERSION_MINOR_X710 0x000A #define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \ I40E_FW_API_VERSION_MINOR_X710 : \ I40E_FW_API_VERSION_MINOR_X722) /* API version 1.7 implements additional link and PHY-specific APIs */ #define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007 /* API version 1.9 for X722 implements additional link and PHY-specific APIs */ #define I40E_MINOR_VER_GET_LINK_INFO_X722 0x0009 /* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */ #define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006 /* API version 1.10 for X722 devices adds ability to request FEC encoding */ #define I40E_MINOR_VER_FW_REQUEST_FEC_X722 0x000A struct i40e_aq_desc { __le16 flags; __le16 opcode; __le16 datalen; __le16 retval; __le32 cookie_high; __le32 cookie_low; union { struct { __le32 param0; __le32 param1; __le32 param2; __le32 param3; } internal; struct { __le32 param0; __le32 param1; __le32 addr_high; __le32 addr_low; } external; u8 raw[16]; } params; }; /* Flags sub-structure * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | */ /* command flags and offsets*/ #define I40E_AQ_FLAG_DD_SHIFT 0 #define I40E_AQ_FLAG_CMP_SHIFT 1 #define I40E_AQ_FLAG_ERR_SHIFT 2 #define I40E_AQ_FLAG_VFE_SHIFT 3 #define I40E_AQ_FLAG_LB_SHIFT 9 #define I40E_AQ_FLAG_RD_SHIFT 10 #define I40E_AQ_FLAG_VFC_SHIFT 11 #define I40E_AQ_FLAG_BUF_SHIFT 12 #define I40E_AQ_FLAG_SI_SHIFT 13 #define I40E_AQ_FLAG_EI_SHIFT 14 #define I40E_AQ_FLAG_FE_SHIFT 15 #define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ #define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ #define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ #define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ #define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ #define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ #define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ #define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ #define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ #define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ #define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ /* error codes */ enum i40e_admin_queue_err { I40E_AQ_RC_OK = 0, /* success */ I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ I40E_AQ_RC_ENOENT = 2, /* No such element */ I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ I40E_AQ_RC_EINTR = 4, /* operation interrupted */ I40E_AQ_RC_EIO = 5, /* I/O error */ I40E_AQ_RC_ENXIO = 6, /* No such resource */ I40E_AQ_RC_E2BIG = 7, /* Arg too long */ I40E_AQ_RC_EAGAIN = 8, /* Try again */ I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ I40E_AQ_RC_EACCES = 10, /* Permission denied */ I40E_AQ_RC_EFAULT = 11, /* Bad address */ I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ I40E_AQ_RC_EEXIST = 13, /* object already exists */ I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ I40E_AQ_RC_EFBIG = 22, /* File too large */ }; /* Admin Queue command opcodes */ enum i40e_admin_queue_opc { /* aq commands */ i40e_aqc_opc_get_version = 0x0001, i40e_aqc_opc_driver_version = 0x0002, i40e_aqc_opc_queue_shutdown = 0x0003, i40e_aqc_opc_set_pf_context = 0x0004, /* resource ownership */ i40e_aqc_opc_request_resource = 0x0008, i40e_aqc_opc_release_resource = 0x0009, i40e_aqc_opc_list_func_capabilities = 0x000A, i40e_aqc_opc_list_dev_capabilities = 0x000B, /* Proxy commands */ i40e_aqc_opc_set_proxy_config = 0x0104, i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105, /* LAA */ i40e_aqc_opc_mac_address_read = 0x0107, i40e_aqc_opc_mac_address_write = 0x0108, /* PXE */ i40e_aqc_opc_clear_pxe_mode = 0x0110, /* WoL commands */ i40e_aqc_opc_set_wol_filter = 0x0120, i40e_aqc_opc_get_wake_reason = 0x0121, i40e_aqc_opc_clear_all_wol_filters = 0x025E, /* internal switch commands */ i40e_aqc_opc_get_switch_config = 0x0200, i40e_aqc_opc_add_statistics = 0x0201, i40e_aqc_opc_remove_statistics = 0x0202, i40e_aqc_opc_set_port_parameters = 0x0203, i40e_aqc_opc_get_switch_resource_alloc = 0x0204, i40e_aqc_opc_set_switch_config = 0x0205, i40e_aqc_opc_rx_ctl_reg_read = 0x0206, i40e_aqc_opc_rx_ctl_reg_write = 0x0207, i40e_aqc_opc_add_vsi = 0x0210, i40e_aqc_opc_update_vsi_parameters = 0x0211, i40e_aqc_opc_get_vsi_parameters = 0x0212, i40e_aqc_opc_add_pv = 0x0220, i40e_aqc_opc_update_pv_parameters = 0x0221, i40e_aqc_opc_get_pv_parameters = 0x0222, i40e_aqc_opc_add_veb = 0x0230, i40e_aqc_opc_update_veb_parameters = 0x0231, i40e_aqc_opc_get_veb_parameters = 0x0232, i40e_aqc_opc_delete_element = 0x0243, i40e_aqc_opc_add_macvlan = 0x0250, i40e_aqc_opc_remove_macvlan = 0x0251, i40e_aqc_opc_add_vlan = 0x0252, i40e_aqc_opc_remove_vlan = 0x0253, i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, i40e_aqc_opc_add_tag = 0x0255, i40e_aqc_opc_remove_tag = 0x0256, i40e_aqc_opc_add_multicast_etag = 0x0257, i40e_aqc_opc_remove_multicast_etag = 0x0258, i40e_aqc_opc_update_tag = 0x0259, i40e_aqc_opc_add_control_packet_filter = 0x025A, i40e_aqc_opc_remove_control_packet_filter = 0x025B, i40e_aqc_opc_add_cloud_filters = 0x025C, i40e_aqc_opc_remove_cloud_filters = 0x025D, i40e_aqc_opc_clear_wol_switch_filters = 0x025E, i40e_aqc_opc_replace_cloud_filters = 0x025F, i40e_aqc_opc_add_mirror_rule = 0x0260, i40e_aqc_opc_delete_mirror_rule = 0x0261, /* DCB commands */ i40e_aqc_opc_dcb_ignore_pfc = 0x0301, i40e_aqc_opc_dcb_updated = 0x0302, i40e_aqc_opc_set_dcb_parameters = 0x0303, /* TX scheduler */ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, i40e_aqc_opc_query_vsi_bw_config = 0x0408, i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, i40e_aqc_opc_enable_switching_comp_ets = 0x0413, i40e_aqc_opc_modify_switching_comp_ets = 0x0414, i40e_aqc_opc_disable_switching_comp_ets = 0x0415, i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, i40e_aqc_opc_query_port_ets_config = 0x0419, i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, i40e_aqc_opc_suspend_port_tx = 0x041B, i40e_aqc_opc_resume_port_tx = 0x041C, i40e_aqc_opc_configure_partition_bw = 0x041D, /* hmc */ i40e_aqc_opc_query_hmc_resource_profile = 0x0500, i40e_aqc_opc_set_hmc_resource_profile = 0x0501, /* phy commands*/ i40e_aqc_opc_get_phy_abilities = 0x0600, i40e_aqc_opc_set_phy_config = 0x0601, i40e_aqc_opc_set_mac_config = 0x0603, i40e_aqc_opc_set_link_restart_an = 0x0605, i40e_aqc_opc_get_link_status = 0x0607, i40e_aqc_opc_set_phy_int_mask = 0x0613, i40e_aqc_opc_get_local_advt_reg = 0x0614, i40e_aqc_opc_set_local_advt_reg = 0x0615, i40e_aqc_opc_get_partner_advt = 0x0616, i40e_aqc_opc_set_lb_modes = 0x0618, i40e_aqc_opc_get_phy_wol_caps = 0x0621, i40e_aqc_opc_set_phy_debug = 0x0622, i40e_aqc_opc_upload_ext_phy_fm = 0x0625, i40e_aqc_opc_run_phy_activity = 0x0626, i40e_aqc_opc_set_phy_register = 0x0628, i40e_aqc_opc_get_phy_register = 0x0629, /* NVM commands */ i40e_aqc_opc_nvm_read = 0x0701, i40e_aqc_opc_nvm_erase = 0x0702, i40e_aqc_opc_nvm_update = 0x0703, i40e_aqc_opc_nvm_config_read = 0x0704, i40e_aqc_opc_nvm_config_write = 0x0705, i40e_aqc_opc_nvm_progress = 0x0706, i40e_aqc_opc_oem_post_update = 0x0720, i40e_aqc_opc_thermal_sensor = 0x0721, /* virtualization commands */ i40e_aqc_opc_send_msg_to_pf = 0x0801, i40e_aqc_opc_send_msg_to_vf = 0x0802, i40e_aqc_opc_send_msg_to_peer = 0x0803, /* alternate structure */ i40e_aqc_opc_alternate_write = 0x0900, i40e_aqc_opc_alternate_write_indirect = 0x0901, i40e_aqc_opc_alternate_read = 0x0902, i40e_aqc_opc_alternate_read_indirect = 0x0903, i40e_aqc_opc_alternate_write_done = 0x0904, i40e_aqc_opc_alternate_set_mode = 0x0905, i40e_aqc_opc_alternate_clear_port = 0x0906, /* LLDP commands */ i40e_aqc_opc_lldp_get_mib = 0x0A00, i40e_aqc_opc_lldp_update_mib = 0x0A01, i40e_aqc_opc_lldp_add_tlv = 0x0A02, i40e_aqc_opc_lldp_update_tlv = 0x0A03, i40e_aqc_opc_lldp_delete_tlv = 0x0A04, i40e_aqc_opc_lldp_stop = 0x0A05, i40e_aqc_opc_lldp_start = 0x0A06, i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07, i40e_aqc_opc_lldp_set_local_mib = 0x0A08, i40e_aqc_opc_lldp_stop_start_spec_agent = 0x0A09, i40e_aqc_opc_lldp_restore = 0x0A0A, /* Tunnel commands */ i40e_aqc_opc_add_udp_tunnel = 0x0B00, i40e_aqc_opc_del_udp_tunnel = 0x0B01, i40e_aqc_opc_set_rss_key = 0x0B02, i40e_aqc_opc_set_rss_lut = 0x0B03, i40e_aqc_opc_get_rss_key = 0x0B04, i40e_aqc_opc_get_rss_lut = 0x0B05, /* Async Events */ i40e_aqc_opc_event_lan_overflow = 0x1001, /* OEM commands */ i40e_aqc_opc_oem_parameter_change = 0xFE00, i40e_aqc_opc_oem_device_status_change = 0xFE01, i40e_aqc_opc_oem_ocsd_initialize = 0xFE02, i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, /* debug commands */ i40e_aqc_opc_debug_read_reg = 0xFF03, i40e_aqc_opc_debug_write_reg = 0xFF04, i40e_aqc_opc_debug_modify_reg = 0xFF07, i40e_aqc_opc_debug_dump_internals = 0xFF08, }; /* command structures and indirect data structures */ /* Structure naming conventions: * - no suffix for direct command descriptor structures * - _data for indirect sent data * - _resp for indirect return data (data which is both will use _data) * - _completion for direct return data * - _element_ for repeated elements (may also be _data or _resp) * * Command structures are expected to overlay the params.raw member of the basic * descriptor, and as such cannot exceed 16 bytes in length. */ /* This macro is used to generate a compilation error if a structure * is not exactly the correct length. It gives a divide by zero error if the * structure is not of the correct size, otherwise it creates an enum that is * never used. */ #define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \ { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } /* This macro is used extensively to ensure that command structures are 16 * bytes in length as they have to map to the raw array of that size. */ #define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) /* internal (0x00XX) commands */ /* Get version (direct 0x0001) */ struct i40e_aqc_get_version { __le32 rom_ver; __le32 fw_build; __le16 fw_major; __le16 fw_minor; __le16 api_major; __le16 api_minor; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); /* Send driver version (indirect 0x0002) */ struct i40e_aqc_driver_version { u8 driver_major_ver; u8 driver_minor_ver; u8 driver_build_ver; u8 driver_subbuild_ver; u8 reserved[4]; __le32 address_high; __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); /* Queue Shutdown (direct 0x0003) */ struct i40e_aqc_queue_shutdown { __le32 driver_unloading; #define I40E_AQ_DRIVER_UNLOADING 0x1 u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); /* Set PF context (0x0004, direct) */ struct i40e_aqc_set_pf_context { u8 pf_id; u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); /* Request resource ownership (direct 0x0008) * Release resource ownership (direct 0x0009) */ #define I40E_AQ_RESOURCE_NVM 1 #define I40E_AQ_RESOURCE_SDP 2 #define I40E_AQ_RESOURCE_ACCESS_READ 1 #define I40E_AQ_RESOURCE_ACCESS_WRITE 2 #define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 #define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 struct i40e_aqc_request_resource { __le16 resource_id; __le16 access_type; __le32 timeout; __le32 resource_number; u8 reserved[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); /* Get function capabilities (indirect 0x000A) * Get device capabilities (indirect 0x000B) */ struct i40e_aqc_list_capabilites { u8 command_flags; #define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 u8 pf_index; u8 reserved[2]; __le32 count; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); struct i40e_aqc_list_capabilities_element_resp { __le16 id; u8 major_rev; u8 minor_rev; __le32 number; __le32 logical_id; __le32 phys_id; u8 reserved[16]; }; /* list of caps */ #define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 #define I40E_AQ_CAP_ID_MNG_MODE 0x0002 #define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 #define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 #define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 #define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 #define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008 #define I40E_AQ_CAP_ID_SRIOV 0x0012 #define I40E_AQ_CAP_ID_VF 0x0013 #define I40E_AQ_CAP_ID_VMDQ 0x0014 #define I40E_AQ_CAP_ID_8021QBG 0x0015 #define I40E_AQ_CAP_ID_8021QBR 0x0016 #define I40E_AQ_CAP_ID_VSI 0x0017 #define I40E_AQ_CAP_ID_DCB 0x0018 #define I40E_AQ_CAP_ID_FCOE 0x0021 #define I40E_AQ_CAP_ID_ISCSI 0x0022 #define I40E_AQ_CAP_ID_RSS 0x0040 #define I40E_AQ_CAP_ID_RXQ 0x0041 #define I40E_AQ_CAP_ID_TXQ 0x0042 #define I40E_AQ_CAP_ID_MSIX 0x0043 #define I40E_AQ_CAP_ID_VF_MSIX 0x0044 #define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 #define I40E_AQ_CAP_ID_1588 0x0046 #define I40E_AQ_CAP_ID_IWARP 0x0051 #define I40E_AQ_CAP_ID_LED 0x0061 #define I40E_AQ_CAP_ID_SDP 0x0062 #define I40E_AQ_CAP_ID_MDIO 0x0063 #define I40E_AQ_CAP_ID_WSR_PROT 0x0064 #define I40E_AQ_CAP_ID_NVM_MGMT 0x0080 #define I40E_AQ_CAP_ID_FLEX10 0x00F1 #define I40E_AQ_CAP_ID_CEM 0x00F2 /* Set CPPM Configuration (direct 0x0103) */ struct i40e_aqc_cppm_configuration { __le16 command_flags; #define I40E_AQ_CPPM_EN_LTRC 0x0800 #define I40E_AQ_CPPM_EN_DMCTH 0x1000 #define I40E_AQ_CPPM_EN_DMCTLX 0x2000 #define I40E_AQ_CPPM_EN_HPTC 0x4000 #define I40E_AQ_CPPM_EN_DMARC 0x8000 __le16 ttlx; __le32 dmacr; __le16 dmcth; u8 hptc; u8 reserved; __le32 pfltrc; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); /* Set ARP Proxy command / response (indirect 0x0104) */ struct i40e_aqc_arp_proxy_data { __le16 command_flags; #define I40E_AQ_ARP_INIT_IPV4 0x0800 #define I40E_AQ_ARP_UNSUP_CTL 0x1000 #define I40E_AQ_ARP_ENA 0x2000 #define I40E_AQ_ARP_ADD_IPV4 0x4000 #define I40E_AQ_ARP_DEL_IPV4 0x8000 __le16 table_id; __le32 enabled_offloads; #define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020 #define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800 __le32 ip_addr; u8 mac_addr[6]; u8 reserved[2]; }; I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data); /* Set NS Proxy Table Entry Command (indirect 0x0105) */ struct i40e_aqc_ns_proxy_data { __le16 table_idx_mac_addr_0; __le16 table_idx_mac_addr_1; __le16 table_idx_ipv6_0; __le16 table_idx_ipv6_1; __le16 control; #define I40E_AQ_NS_PROXY_ADD_0 0x0001 #define I40E_AQ_NS_PROXY_DEL_0 0x0002 #define I40E_AQ_NS_PROXY_ADD_1 0x0004 #define I40E_AQ_NS_PROXY_DEL_1 0x0008 #define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010 #define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020 #define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040 #define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080 #define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100 #define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200 #define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400 #define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800 #define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000 u8 mac_addr_0[6]; u8 mac_addr_1[6]; u8 local_mac_addr[6]; u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ u8 ipv6_addr_1[16]; }; I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data); /* Manage LAA Command (0x0106) - obsolete */ struct i40e_aqc_mng_laa { __le16 command_flags; #define I40E_AQ_LAA_FLAG_WR 0x8000 u8 reserved[2]; __le32 sal; __le16 sah; u8 reserved2[6]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa); /* Manage MAC Address Read Command (indirect 0x0107) */ struct i40e_aqc_mac_address_read { __le16 command_flags; #define I40E_AQC_LAN_ADDR_VALID 0x10 #define I40E_AQC_SAN_ADDR_VALID 0x20 #define I40E_AQC_PORT_ADDR_VALID 0x40 #define I40E_AQC_WOL_ADDR_VALID 0x80 #define I40E_AQC_MC_MAG_EN_VALID 0x100 #define I40E_AQC_WOL_PRESERVE_STATUS 0x200 #define I40E_AQC_ADDR_VALID_MASK 0x3F0 u8 reserved[6]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); struct i40e_aqc_mac_address_read_data { u8 pf_lan_mac[6]; u8 pf_san_mac[6]; u8 port_mac[6]; u8 pf_wol_mac[6]; }; I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); /* Manage MAC Address Write Command (0x0108) */ struct i40e_aqc_mac_address_write { __le16 command_flags; #define I40E_AQC_MC_MAG_EN 0x0100 #define I40E_AQC_WOL_PRESERVE_ON_PFR 0x0200 #define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 #define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 #define I40E_AQC_WRITE_TYPE_PORT 0x8000 #define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000 #define I40E_AQC_WRITE_TYPE_MASK 0xC000 __le16 mac_sah; __le32 mac_sal; u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); /* PXE commands (0x011x) */ /* Clear PXE Command and response (direct 0x0110) */ struct i40e_aqc_clear_pxe { u8 rx_cnt; u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); /* Set WoL Filter (0x0120) */ struct i40e_aqc_set_wol_filter { __le16 filter_index; #define I40E_AQC_MAX_NUM_WOL_FILTERS 8 #define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15 #define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \ I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT) #define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0 #define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \ I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT) __le16 cmd_flags; #define I40E_AQC_SET_WOL_FILTER 0x8000 #define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000 #define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000 #define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0 #define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1 __le16 valid_flags; #define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000 #define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000 u8 reserved[2]; __le32 address_high; __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter); struct i40e_aqc_set_wol_filter_data { u8 filter[128]; u8 mask[16]; }; I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data); /* Get Wake Reason (0x0121) */ struct i40e_aqc_get_wake_reason_completion { u8 reserved_1[2]; __le16 wake_reason; #define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0 #define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT) #define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8 #define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT) u8 reserved_2[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion); /* Switch configuration commands (0x02xx) */ /* Used by many indirect commands that only pass an seid and a buffer in the * command */ struct i40e_aqc_switch_seid { __le16 seid; u8 reserved[6]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); /* Get Switch Configuration command (indirect 0x0200) * uses i40e_aqc_switch_seid for the descriptor */ struct i40e_aqc_get_switch_config_header_resp { __le16 num_reported; __le16 num_total; u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp); struct i40e_aqc_switch_config_element_resp { u8 element_type; #define I40E_AQ_SW_ELEM_TYPE_MAC 1 #define I40E_AQ_SW_ELEM_TYPE_PF 2 #define I40E_AQ_SW_ELEM_TYPE_VF 3 #define I40E_AQ_SW_ELEM_TYPE_EMP 4 #define I40E_AQ_SW_ELEM_TYPE_BMC 5 #define I40E_AQ_SW_ELEM_TYPE_PV 16 #define I40E_AQ_SW_ELEM_TYPE_VEB 17 #define I40E_AQ_SW_ELEM_TYPE_PA 18 #define I40E_AQ_SW_ELEM_TYPE_VSI 19 u8 revision; #define I40E_AQ_SW_ELEM_REV_1 1 __le16 seid; __le16 uplink_seid; __le16 downlink_seid; u8 reserved[3]; u8 connection_type; #define I40E_AQ_CONN_TYPE_REGULAR 0x1 #define I40E_AQ_CONN_TYPE_DEFAULT 0x2 #define I40E_AQ_CONN_TYPE_CASCADED 0x3 __le16 scheduler_id; __le16 element_info; }; I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp); /* Get Switch Configuration (indirect 0x0200) * an array of elements are returned in the response buffer * the first in the array is the header, remainder are elements */ struct i40e_aqc_get_switch_config_resp { struct i40e_aqc_get_switch_config_header_resp header; struct i40e_aqc_switch_config_element_resp element[1]; }; I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp); /* Add Statistics (direct 0x0201) * Remove Statistics (direct 0x0202) */ struct i40e_aqc_add_remove_statistics { __le16 seid; __le16 vlan; __le16 stat_index; u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); /* Set Port Parameters command (direct 0x0203) */ struct i40e_aqc_set_port_parameters { __le16 command_flags; #define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 #define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ #define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 __le16 bad_frame_vsi; #define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0 #define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF __le16 default_seid; /* reserved for command */ u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); /* Get Switch Resource Allocation (indirect 0x0204) */ struct i40e_aqc_get_switch_resource_alloc { u8 num_entries; /* reserved for command */ u8 reserved[7]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); /* expect an array of these structs in the response buffer */ struct i40e_aqc_switch_resource_alloc_element_resp { u8 resource_type; #define I40E_AQ_RESOURCE_TYPE_VEB 0x0 #define I40E_AQ_RESOURCE_TYPE_VSI 0x1 #define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 #define I40E_AQ_RESOURCE_TYPE_STAG 0x3 #define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 #define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 #define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 #define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 #define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 #define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 #define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA #define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB #define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC #define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD #define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF #define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 #define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 #define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 #define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 u8 reserved1; __le16 guaranteed; __le16 total; __le16 used; __le16 total_unalloced; u8 reserved2[6]; }; I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp); /* Set Switch Configuration (direct 0x0205) */ struct i40e_aqc_set_switch_config { __le16 flags; /* flags used for both fields below */ #define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 #define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 #define I40E_AQ_SET_SWITCH_CFG_HW_ATR_EVICT 0x0004 __le16 valid_flags; /* The ethertype in switch_tag is dropped on ingress and used * internally by the switch. Set this to zero for the default * of 0x88a8 (802.1ad). Should be zero for firmware API * versions lower than 1.7. */ __le16 switch_tag; /* The ethertypes in first_tag and second_tag are used to * match the outer and inner VLAN tags (respectively) when HW * double VLAN tagging is enabled via the set port parameters * AQ command. Otherwise these are both ignored. Set them to * zero for their defaults of 0x8100 (802.1Q). Should be zero * for firmware API versions lower than 1.7. */ __le16 first_tag; __le16 second_tag; /* Next byte is split into following: * Bit 7 : 0 : No action, 1: Switch to mode defined by bits 6:0 * Bit 6 : 0 : Destination Port, 1: source port * Bit 5..4 : L4 type * 0: rsvd * 1: TCP * 2: UDP * 3: Both TCP and UDP * Bits 3:0 Mode * 0: default mode * 1: L4 port only mode * 2: non-tunneled mode * 3: tunneled mode */ #define I40E_AQ_SET_SWITCH_BIT7_VALID 0x80 #define I40E_AQ_SET_SWITCH_L4_SRC_PORT 0x40 #define I40E_AQ_SET_SWITCH_L4_TYPE_RSVD 0x00 #define I40E_AQ_SET_SWITCH_L4_TYPE_TCP 0x10 #define I40E_AQ_SET_SWITCH_L4_TYPE_UDP 0x20 #define I40E_AQ_SET_SWITCH_L4_TYPE_BOTH 0x30 #define I40E_AQ_SET_SWITCH_MODE_DEFAULT 0x00 #define I40E_AQ_SET_SWITCH_MODE_L4_PORT 0x01 #define I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL 0x02 #define I40E_AQ_SET_SWITCH_MODE_TUNNEL 0x03 u8 mode; u8 rsvd5[5]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config); /* Read Receive control registers (direct 0x0206) * Write Receive control registers (direct 0x0207) * used for accessing Rx control registers that can be * slow and need special handling when under high Rx load */ struct i40e_aqc_rx_ctl_reg_read_write { __le32 reserved1; __le32 address; __le32 reserved2; __le32 value; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write); /* Add VSI (indirect 0x0210) * this indirect command uses struct i40e_aqc_vsi_properties_data * as the indirect buffer (128 bytes) * * Update VSI (indirect 0x211) * uses the same data structure as Add VSI * * Get VSI (indirect 0x0212) * uses the same completion and data structure as Add VSI */ struct i40e_aqc_add_get_update_vsi { __le16 uplink_seid; u8 connection_type; #define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 #define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 #define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 u8 reserved1; u8 vf_id; u8 reserved2; __le16 vsi_flags; #define I40E_AQ_VSI_TYPE_SHIFT 0x0 #define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) #define I40E_AQ_VSI_TYPE_VF 0x0 #define I40E_AQ_VSI_TYPE_VMDQ2 0x1 #define I40E_AQ_VSI_TYPE_PF 0x2 #define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 #define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); struct i40e_aqc_add_get_update_vsi_completion { __le16 seid; __le16 vsi_number; __le16 vsi_used; __le16 vsi_free; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion); struct i40e_aqc_vsi_properties_data { /* first 96 byte are written by SW */ __le16 valid_sections; #define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 #define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 #define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 #define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 #define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 #define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 #define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 #define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 #define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 #define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 /* switch section */ __le16 switch_id; /* 12bit id combined with flags below */ #define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 #define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) #define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 #define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 #define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 u8 sw_reserved[2]; /* security section */ u8 sec_flags; #define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 #define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 #define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 u8 sec_reserved; /* VLAN section */ __le16 pvid; /* VLANS include priority bits */ __le16 fcoe_pvid; u8 port_vlan_flags; #define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 #define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ I40E_AQ_VSI_PVLAN_MODE_SHIFT) #define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 #define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 #define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 #define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 #define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 #define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ I40E_AQ_VSI_PVLAN_EMOD_SHIFT) #define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 #define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 #define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 #define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 u8 pvlan_reserved[3]; /* ingress egress up sections */ __le32 ingress_table; /* bitmap, 3 bits per up */ #define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 #define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) #define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 #define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) #define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 #define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) #define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 #define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) #define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 #define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) #define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 #define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) #define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 #define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) #define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 #define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) __le32 egress_table; /* same defines as for ingress table */ /* cascaded PV section */ __le16 cas_pv_tag; u8 cas_pv_flags; #define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 #define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) #define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 #define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 #define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 #define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 #define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 #define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 u8 cas_pv_reserved; /* queue mapping section */ __le16 mapping_flags; #define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 #define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 __le16 queue_mapping[16]; #define I40E_AQ_VSI_QUEUE_SHIFT 0x0 #define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) __le16 tc_mapping[8]; #define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 #define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) #define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 #define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) /* queueing option section */ u8 queueing_opt_flags; #define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04 #define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08 #define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 #define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 #define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00 #define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40 u8 queueing_opt_reserved[3]; /* scheduler section */ u8 up_enable_bits; u8 sched_reserved; /* outer up section */ __le32 outer_up_table; /* same structure and defines as ingress tbl */ u8 cmd_reserved[8]; /* last 32 bytes are written by FW */ __le16 qs_handle[8]; #define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF __le16 stat_counter_idx; __le16 sched_id; u8 resp_reserved[12]; }; I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); /* Add Port Virtualizer (direct 0x0220) * also used for update PV (direct 0x0221) but only flags are used * (IS_CTRL_PORT only works on add PV) */ struct i40e_aqc_add_update_pv { __le16 command_flags; #define I40E_AQC_PV_FLAG_PV_TYPE 0x1 #define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 #define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 #define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 __le16 uplink_seid; __le16 connected_seid; u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); struct i40e_aqc_add_update_pv_completion { /* reserved for update; for add also encodes error if rc == ENOSPC */ __le16 pv_seid; #define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 #define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 #define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 #define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); /* Get PV Params (direct 0x0222) * uses i40e_aqc_switch_seid for the descriptor */ struct i40e_aqc_get_pv_params_completion { __le16 seid; __le16 default_stag; __le16 pv_flags; /* same flags as add_pv */ #define I40E_AQC_GET_PV_PV_TYPE 0x1 #define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 #define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 u8 reserved[8]; __le16 default_port_seid; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); /* Add VEB (direct 0x0230) */ struct i40e_aqc_add_veb { __le16 uplink_seid; __le16 downlink_seid; __le16 veb_flags; #define I40E_AQC_ADD_VEB_FLOATING 0x1 #define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 #define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) #define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 #define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 #define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */ #define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10 u8 enable_tcs; u8 reserved[9]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); struct i40e_aqc_add_veb_completion { u8 reserved[6]; __le16 switch_seid; /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ __le16 veb_seid; #define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 #define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 #define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 #define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 __le16 statistic_index; __le16 vebs_used; __le16 vebs_free; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); /* Get VEB Parameters (direct 0x0232) * uses i40e_aqc_switch_seid for the descriptor */ struct i40e_aqc_get_veb_parameters_completion { __le16 seid; __le16 switch_id; __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ __le16 statistic_index; __le16 vebs_used; __le16 vebs_free; u8 reserved[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); /* Delete Element (direct 0x0243) * uses the generic i40e_aqc_switch_seid */ /* Add MAC-VLAN (indirect 0x0250) */ /* used for the command for most vlan commands */ struct i40e_aqc_macvlan { __le16 num_addresses; __le16 seid[3]; #define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 #define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) #define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); /* indirect data for command and response */ struct i40e_aqc_add_macvlan_element_data { u8 mac_addr[6]; __le16 vlan_tag; __le16 flags; #define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 #define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 #define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 #define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 #define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010 __le16 queue_number; #define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 #define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) /* response section */ u8 match_method; #define I40E_AQC_MM_PERFECT_MATCH 0x01 #define I40E_AQC_MM_HASH_MATCH 0x02 #define I40E_AQC_MM_ERR_NO_RES 0xFF u8 reserved1[3]; }; struct i40e_aqc_add_remove_macvlan_completion { __le16 perfect_mac_used; __le16 perfect_mac_free; __le16 unicast_hash_free; __le16 multicast_hash_free; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion); /* Remove MAC-VLAN (indirect 0x0251) * uses i40e_aqc_macvlan for the descriptor * data points to an array of num_addresses of elements */ struct i40e_aqc_remove_macvlan_element_data { u8 mac_addr[6]; __le16 vlan_tag; u8 flags; #define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 #define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 #define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 #define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 u8 reserved[3]; /* reply section */ u8 error_code; #define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 #define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF u8 reply_reserved[3]; }; /* Add VLAN (indirect 0x0252) * Remove VLAN (indirect 0x0253) * use the generic i40e_aqc_macvlan for the command */ struct i40e_aqc_add_remove_vlan_element_data { __le16 vlan_tag; u8 vlan_flags; /* flags for add VLAN */ #define I40E_AQC_ADD_VLAN_LOCAL 0x1 #define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 #define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) #define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 #define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 #define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 #define I40E_AQC_VLAN_PTYPE_SHIFT 3 #define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) #define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 #define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 #define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 #define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 /* flags for remove VLAN */ #define I40E_AQC_REMOVE_VLAN_ALL 0x1 u8 reserved; u8 result; /* flags for add VLAN */ #define I40E_AQC_ADD_VLAN_SUCCESS 0x0 #define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE #define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF /* flags for remove VLAN */ #define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 #define I40E_AQC_REMOVE_VLAN_FAIL 0xFF u8 reserved1[3]; }; struct i40e_aqc_add_remove_vlan_completion { u8 reserved[4]; __le16 vlans_used; __le16 vlans_free; __le32 addr_high; __le32 addr_low; }; /* Set VSI Promiscuous Modes (direct 0x0254) */ struct i40e_aqc_set_vsi_promiscuous_modes { __le16 promiscuous_flags; __le16 valid_flags; /* flags used for both fields above */ #define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 #define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 #define I40E_AQC_SET_VSI_DEFAULT 0x08 #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 #define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 __le16 seid; #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF __le16 vlan_tag; #define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF #define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); /* Add S/E-tag command (direct 0x0255) * Uses generic i40e_aqc_add_remove_tag_completion for completion */ struct i40e_aqc_add_tag { __le16 flags; #define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 __le16 seid; #define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 #define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) __le16 tag; __le16 queue_number; u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); struct i40e_aqc_add_remove_tag_completion { u8 reserved[12]; __le16 tags_used; __le16 tags_free; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); /* Remove S/E-tag command (direct 0x0256) * Uses generic i40e_aqc_add_remove_tag_completion for completion */ struct i40e_aqc_remove_tag { __le16 seid; #define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 #define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) __le16 tag; u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag); /* Add multicast E-Tag (direct 0x0257) * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields * and no external data */ struct i40e_aqc_add_remove_mcast_etag { __le16 pv_seid; __le16 etag; u8 num_unicast_etags; u8 reserved[3]; __le32 addr_high; /* address of array of 2-byte s-tags */ __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); struct i40e_aqc_add_remove_mcast_etag_completion { u8 reserved[4]; __le16 mcast_etags_used; __le16 mcast_etags_free; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); /* Update S/E-Tag (direct 0x0259) */ struct i40e_aqc_update_tag { __le16 seid; #define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 #define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) __le16 old_tag; __le16 new_tag; u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); struct i40e_aqc_update_tag_completion { u8 reserved[12]; __le16 tags_used; __le16 tags_free; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); /* Add Control Packet filter (direct 0x025A) * Remove Control Packet filter (direct 0x025B) * uses the i40e_aqc_add_oveb_cloud, * and the generic direct completion structure */ struct i40e_aqc_add_remove_control_packet_filter { u8 mac[6]; __le16 etype; __le16 flags; #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 __le16 seid; #define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 #define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) __le16 queue; u8 reserved[2]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); struct i40e_aqc_add_remove_control_packet_filter_completion { __le16 mac_etype_used; __le16 etype_used; __le16 mac_etype_free; __le16 etype_free; u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); /* Add Cloud filters (indirect 0x025C) * Remove Cloud filters (indirect 0x025D) * uses the i40e_aqc_add_remove_cloud_filters, * and the generic indirect completion structure */ struct i40e_aqc_add_remove_cloud_filters { u8 num_filters; u8 reserved; __le16 seid; #define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 #define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) u8 big_buffer_flag; #define I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER 1 #define I40E_AQC_ADD_CLOUD_CMD_BB 1 u8 reserved2[3]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); struct i40e_aqc_cloud_filters_element_data { u8 outer_mac[6]; u8 inner_mac[6]; __le16 inner_vlan; union { struct { u8 reserved[12]; u8 data[4]; } v4; struct { u8 data[16]; } v6; struct { __le16 data[8]; } raw_v6; } ipaddr; __le16 flags; #define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 #define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ I40E_AQC_ADD_CLOUD_FILTER_SHIFT) /* 0x0000 reserved */ /* 0x0001 reserved */ /* 0x0002 reserved */ #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 /* 0x0005 reserved */ #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 /* 0x0007 reserved */ /* 0x0008 reserved */ #define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 #define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A #define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B #define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C /* 0x000D reserved */ /* 0x000E reserved */ /* 0x000F reserved */ /* 0x0010 to 0x0017 is for custom filters */ #define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT 0x0010 /* Dest IP + L4 Port */ #define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */ #define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT 0x0012 /* Dest MAC + VLAN + L4 Port */ #define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 #define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 #define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 #define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 #define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5 #define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000 #define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000 #define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000 __le32 tenant_id; u8 reserved[4]; __le16 queue_number; #define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 #define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \ I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) u8 reserved2[14]; /* response section */ u8 allocation_result; #define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 #define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF u8 response_reserved[7]; }; /* i40e_aqc_add_rm_cloud_filt_elem_ext is used when * I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER flag is set. */ struct i40e_aqc_add_rm_cloud_filt_elem_ext { struct i40e_aqc_cloud_filters_element_data element; u16 general_fields[32]; #define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30 }; I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data); /* i40e_aqc_cloud_filters_element_bb is used when * I40E_AQC_CLOUD_CMD_BB flag is set. */ struct i40e_aqc_cloud_filters_element_bb { struct i40e_aqc_cloud_filters_element_data element; u16 general_fields[32]; #define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30 }; I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb); struct i40e_aqc_remove_cloud_filters_completion { __le16 perfect_ovlan_used; __le16 perfect_ovlan_free; __le16 vlan_used; __le16 vlan_free; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); /* Replace filter Command 0x025F * uses the i40e_aqc_replace_cloud_filters, * and the generic indirect completion structure */ struct i40e_filter_data { u8 filter_type; u8 input[3]; }; I40E_CHECK_STRUCT_LEN(4, i40e_filter_data); struct i40e_aqc_replace_cloud_filters_cmd { u8 valid_flags; #define I40E_AQC_REPLACE_L1_FILTER 0x0 #define I40E_AQC_REPLACE_CLOUD_FILTER 0x1 #define I40E_AQC_GET_CLOUD_FILTERS 0x2 #define I40E_AQC_MIRROR_CLOUD_FILTER 0x4 #define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8 u8 old_filter_type; u8 new_filter_type; u8 tr_bit; u8 tr_bit2; u8 reserved[3]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd); struct i40e_aqc_replace_cloud_filters_cmd_buf { u8 data[32]; /* Filter type INPUT codes*/ #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED (1 << 7UL) /* Field Vector offsets */ #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12 /* big FLU */ #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14 /* big FLU */ #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37 struct i40e_filter_data filters[8]; }; I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_replace_cloud_filters_cmd_buf); /* Add Mirror Rule (indirect or direct 0x0260) * Delete Mirror Rule (indirect or direct 0x0261) * note: some rule types (4,5) do not use an external buffer. * take care to set the flags correctly. */ struct i40e_aqc_add_delete_mirror_rule { __le16 seid; __le16 rule_type; #define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 #define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ I40E_AQC_MIRROR_RULE_TYPE_SHIFT) #define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 #define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 #define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 #define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 #define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 __le16 num_entries; __le16 destination; /* VSI for add, rule id for delete */ __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); struct i40e_aqc_add_delete_mirror_rule_completion { u8 reserved[2]; __le16 rule_id; /* only used on add */ __le16 mirror_rules_used; __le16 mirror_rules_free; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); /* DCB 0x03xx*/ /* PFC Ignore (direct 0x0301) * the command and response use the same descriptor structure */ struct i40e_aqc_pfc_ignore { u8 tc_bitmap; u8 command_flags; /* unused on response */ #define I40E_AQC_PFC_IGNORE_SET 0x80 #define I40E_AQC_PFC_IGNORE_CLEAR 0x0 u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); /* DCB Update (direct 0x0302) uses the i40e_aq_desc structure * with no parameters */ /* TX scheduler 0x04xx */ /* Almost all the indirect commands use * this generic struct to pass the SEID in param0 */ struct i40e_aqc_tx_sched_ind { __le16 vsi_seid; u8 reserved[6]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); /* Several commands respond with a set of queue set handles */ struct i40e_aqc_qs_handles_resp { __le16 qs_handles[8]; }; /* Configure VSI BW limits (direct 0x0400) */ struct i40e_aqc_configure_vsi_bw_limit { __le16 vsi_seid; u8 reserved[2]; __le16 credit; u8 reserved1[2]; u8 max_credit; /* 0-3, limit = 2^max */ u8 reserved2[7]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); /* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406) * responds with i40e_aqc_qs_handles_resp */ struct i40e_aqc_configure_vsi_ets_sla_bw_data { u8 tc_valid_bits; u8 reserved[15]; __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ __le16 tc_bw_max[2]; u8 reserved1[28]; }; I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data); /* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) * responds with i40e_aqc_qs_handles_resp */ struct i40e_aqc_configure_vsi_tc_bw_data { u8 tc_valid_bits; u8 reserved[3]; u8 tc_bw_credits[8]; u8 reserved1[4]; __le16 qs_handles[8]; }; I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data); /* Query vsi bw configuration (indirect 0x0408) */ struct i40e_aqc_query_vsi_bw_config_resp { u8 tc_valid_bits; u8 tc_suspended_bits; u8 reserved[14]; __le16 qs_handles[8]; u8 reserved1[4]; __le16 port_bw_limit; u8 reserved2[2]; u8 max_bw; /* 0-3, limit = 2^max */ u8 reserved3[23]; }; I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp); /* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ struct i40e_aqc_query_vsi_ets_sla_config_resp { u8 tc_valid_bits; u8 reserved[3]; u8 share_credits[8]; __le16 credits[8]; /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ __le16 tc_bw_max[2]; }; I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp); /* Configure Switching Component Bandwidth Limit (direct 0x0410) */ struct i40e_aqc_configure_switching_comp_bw_limit { __le16 seid; u8 reserved[2]; __le16 credit; u8 reserved1[2]; u8 max_bw; /* 0-3, limit = 2^max */ u8 reserved2[7]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); /* Enable Physical Port ETS (indirect 0x0413) * Modify Physical Port ETS (indirect 0x0414) * Disable Physical Port ETS (indirect 0x0415) */ struct i40e_aqc_configure_switching_comp_ets_data { u8 reserved[4]; u8 tc_valid_bits; u8 seepage; #define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 u8 tc_strict_priority_flags; u8 reserved1[17]; u8 tc_bw_share_credits[8]; u8 reserved2[96]; }; I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data); /* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { u8 tc_valid_bits; u8 reserved[15]; __le16 tc_bw_credit[8]; /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ __le16 tc_bw_max[2]; u8 reserved1[28]; }; I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_switching_comp_ets_bw_limit_data); /* Configure Switching Component Bandwidth Allocation per Tc * (indirect 0x0417) */ struct i40e_aqc_configure_switching_comp_bw_config_data { u8 tc_valid_bits; u8 reserved[2]; u8 absolute_credits; /* bool */ u8 tc_bw_share_credits[8]; u8 reserved1[20]; }; I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data); /* Query Switching Component Configuration (indirect 0x0418) */ struct i40e_aqc_query_switching_comp_ets_config_resp { u8 tc_valid_bits; u8 reserved[35]; __le16 port_bw_limit; u8 reserved1[2]; u8 tc_bw_max; /* 0-3, limit = 2^max */ u8 reserved2[23]; }; I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp); /* Query PhysicalPort ETS Configuration (indirect 0x0419) */ struct i40e_aqc_query_port_ets_config_resp { u8 reserved[4]; u8 tc_valid_bits; u8 reserved1; u8 tc_strict_priority_bits; u8 reserved2; u8 tc_bw_share_credits[8]; __le16 tc_bw_limits[8]; /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ __le16 tc_bw_max[2]; u8 reserved3[32]; }; I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp); /* Query Switching Component Bandwidth Allocation per Traffic Type * (indirect 0x041A) */ struct i40e_aqc_query_switching_comp_bw_config_resp { u8 tc_valid_bits; u8 reserved[2]; u8 absolute_credits_enable; /* bool */ u8 tc_bw_share_credits[8]; __le16 tc_bw_limits[8]; /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ __le16 tc_bw_max[2]; }; I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp); /* Suspend/resume port TX traffic * (direct 0x041B and 0x041C) uses the generic SEID struct */ /* Configure partition BW * (indirect 0x041D) */ struct i40e_aqc_configure_partition_bw_data { __le16 pf_valid_bits; u8 min_bw[16]; /* guaranteed bandwidth */ u8 max_bw[16]; /* bandwidth limit */ }; I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data); /* Get and set the active HMC resource profile and status. * (direct 0x0500) and (direct 0x0501) */ struct i40e_aq_get_set_hmc_resource_profile { u8 pm_profile; u8 pe_vf_enabled; u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); enum i40e_aq_hmc_profile { /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ I40E_HMC_PROFILE_DEFAULT = 1, I40E_HMC_PROFILE_FAVOR_VF = 2, I40E_HMC_PROFILE_EQUAL = 3, }; /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ /* set in param0 for get phy abilities to report qualified modules */ #define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 #define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 enum i40e_aq_phy_type { I40E_PHY_TYPE_SGMII = 0x0, I40E_PHY_TYPE_1000BASE_KX = 0x1, I40E_PHY_TYPE_10GBASE_KX4 = 0x2, I40E_PHY_TYPE_10GBASE_KR = 0x3, I40E_PHY_TYPE_40GBASE_KR4 = 0x4, I40E_PHY_TYPE_XAUI = 0x5, I40E_PHY_TYPE_XFI = 0x6, I40E_PHY_TYPE_SFI = 0x7, I40E_PHY_TYPE_XLAUI = 0x8, I40E_PHY_TYPE_XLPPI = 0x9, I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA, I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, I40E_PHY_TYPE_10GBASE_AOC = 0xC, I40E_PHY_TYPE_40GBASE_AOC = 0xD, I40E_PHY_TYPE_UNRECOGNIZED = 0xE, I40E_PHY_TYPE_UNSUPPORTED = 0xF, I40E_PHY_TYPE_100BASE_TX = 0x11, I40E_PHY_TYPE_1000BASE_T = 0x12, I40E_PHY_TYPE_10GBASE_T = 0x13, I40E_PHY_TYPE_10GBASE_SR = 0x14, I40E_PHY_TYPE_10GBASE_LR = 0x15, I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16, I40E_PHY_TYPE_10GBASE_CR1 = 0x17, I40E_PHY_TYPE_40GBASE_CR4 = 0x18, I40E_PHY_TYPE_40GBASE_SR4 = 0x19, I40E_PHY_TYPE_40GBASE_LR4 = 0x1A, I40E_PHY_TYPE_1000BASE_SX = 0x1B, I40E_PHY_TYPE_1000BASE_LX = 0x1C, I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D, I40E_PHY_TYPE_20GBASE_KR2 = 0x1E, I40E_PHY_TYPE_25GBASE_KR = 0x1F, I40E_PHY_TYPE_25GBASE_CR = 0x20, I40E_PHY_TYPE_25GBASE_SR = 0x21, I40E_PHY_TYPE_25GBASE_LR = 0x22, I40E_PHY_TYPE_25GBASE_AOC = 0x23, I40E_PHY_TYPE_25GBASE_ACC = 0x24, + I40E_PHY_TYPE_2_5GBASE_T = 0x30, + I40E_PHY_TYPE_5GBASE_T = 0x31, I40E_PHY_TYPE_MAX, I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD, I40E_PHY_TYPE_EMPTY = 0xFE, I40E_PHY_TYPE_DEFAULT = 0xFF, }; #define I40E_PHY_TYPES_BITMASK (BIT_ULL(I40E_PHY_TYPE_SGMII) | \ BIT_ULL(I40E_PHY_TYPE_1000BASE_KX) | \ BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4) | \ BIT_ULL(I40E_PHY_TYPE_10GBASE_KR) | \ BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4) | \ BIT_ULL(I40E_PHY_TYPE_XAUI) | \ BIT_ULL(I40E_PHY_TYPE_XFI) | \ BIT_ULL(I40E_PHY_TYPE_SFI) | \ BIT_ULL(I40E_PHY_TYPE_XLAUI) | \ BIT_ULL(I40E_PHY_TYPE_XLPPI) | \ BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU) | \ BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU) | \ BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC) | \ BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC) | \ BIT_ULL(I40E_PHY_TYPE_UNRECOGNIZED) | \ BIT_ULL(I40E_PHY_TYPE_UNSUPPORTED) | \ BIT_ULL(I40E_PHY_TYPE_100BASE_TX) | \ BIT_ULL(I40E_PHY_TYPE_1000BASE_T) | \ BIT_ULL(I40E_PHY_TYPE_10GBASE_T) | \ BIT_ULL(I40E_PHY_TYPE_10GBASE_SR) | \ BIT_ULL(I40E_PHY_TYPE_10GBASE_LR) | \ BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU) | \ BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1) | \ BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4) | \ BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4) | \ BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4) | \ BIT_ULL(I40E_PHY_TYPE_1000BASE_SX) | \ BIT_ULL(I40E_PHY_TYPE_1000BASE_LX) | \ BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL) | \ BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2) | \ BIT_ULL(I40E_PHY_TYPE_25GBASE_KR) | \ BIT_ULL(I40E_PHY_TYPE_25GBASE_CR) | \ BIT_ULL(I40E_PHY_TYPE_25GBASE_SR) | \ BIT_ULL(I40E_PHY_TYPE_25GBASE_LR) | \ BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC) | \ - BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC)) + BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC) | \ + BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T) | \ + BIT_ULL(I40E_PHY_TYPE_5GBASE_T)) +#define I40E_LINK_SPEED_2_5GB_SHIFT 0x0 #define I40E_LINK_SPEED_100MB_SHIFT 0x1 #define I40E_LINK_SPEED_1000MB_SHIFT 0x2 #define I40E_LINK_SPEED_10GB_SHIFT 0x3 #define I40E_LINK_SPEED_40GB_SHIFT 0x4 #define I40E_LINK_SPEED_20GB_SHIFT 0x5 #define I40E_LINK_SPEED_25GB_SHIFT 0x6 +#define I40E_LINK_SPEED_5GB_SHIFT 0x7 enum i40e_aq_link_speed { I40E_LINK_SPEED_UNKNOWN = 0, I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT), I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT), + I40E_LINK_SPEED_2_5GB = (1 << I40E_LINK_SPEED_2_5GB_SHIFT), + I40E_LINK_SPEED_5GB = (1 << I40E_LINK_SPEED_5GB_SHIFT), I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT), I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT), I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT), I40E_LINK_SPEED_25GB = (1 << I40E_LINK_SPEED_25GB_SHIFT), }; struct i40e_aqc_module_desc { u8 oui[3]; u8 reserved1; u8 part_number[16]; u8 revision[4]; u8 reserved2[8]; }; I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc); struct i40e_aq_get_phy_abilities_resp { __le32 phy_type; /* bitmap using the above enum for offsets */ u8 link_speed; /* bitmap using the above enum bit patterns */ u8 abilities; #define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 #define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 #define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 #define I40E_AQ_PHY_LINK_ENABLED 0x08 #define I40E_AQ_PHY_AN_ENABLED 0x10 #define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 #define I40E_AQ_PHY_FEC_ABILITY_KR 0x40 #define I40E_AQ_PHY_FEC_ABILITY_RS 0x80 __le16 eee_capability; #define I40E_AQ_EEE_AUTO 0x0001 #define I40E_AQ_EEE_100BASE_TX 0x0002 #define I40E_AQ_EEE_1000BASE_T 0x0004 #define I40E_AQ_EEE_10GBASE_T 0x0008 #define I40E_AQ_EEE_1000BASE_KX 0x0010 #define I40E_AQ_EEE_10GBASE_KX4 0x0020 #define I40E_AQ_EEE_10GBASE_KR 0x0040 +#define I40E_AQ_EEE_2_5GBASE_T 0x0100 +#define I40E_AQ_EEE_5GBASE_T 0x0200 __le32 eeer_val; u8 d3_lpan; #define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 u8 phy_type_ext; #define I40E_AQ_PHY_TYPE_EXT_25G_KR 0x01 #define I40E_AQ_PHY_TYPE_EXT_25G_CR 0x02 #define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04 #define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08 #define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10 #define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20 +#define I40E_AQ_PHY_TYPE_EXT_2_5GBASE_T 0x40 +#define I40E_AQ_PHY_TYPE_EXT_5GBASE_T 0x80 u8 fec_cfg_curr_mod_ext_info; #define I40E_AQ_ENABLE_FEC_KR 0x01 #define I40E_AQ_ENABLE_FEC_RS 0x02 #define I40E_AQ_REQUEST_FEC_KR 0x04 #define I40E_AQ_REQUEST_FEC_RS 0x08 #define I40E_AQ_ENABLE_FEC_AUTO 0x10 #define I40E_AQ_FEC #define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0 #define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5 u8 ext_comp_code; u8 phy_id[4]; u8 module_type[3]; u8 qualified_module_count; #define I40E_AQ_PHY_MAX_QMS 16 struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; }; I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp); /* Set PHY Config (direct 0x0601) */ struct i40e_aq_set_phy_config { /* same bits as above in all */ __le32 phy_type; u8 link_speed; u8 abilities; /* bits 0-2 use the values from get_phy_abilities_resp */ #define I40E_AQ_PHY_ENABLE_LINK 0x08 #define I40E_AQ_PHY_ENABLE_AN 0x10 #define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 __le16 eee_capability; __le32 eeer; u8 low_power_ctrl; u8 phy_type_ext; u8 fec_config; #define I40E_AQ_SET_FEC_ABILITY_KR BIT(0) #define I40E_AQ_SET_FEC_ABILITY_RS BIT(1) #define I40E_AQ_SET_FEC_REQUEST_KR BIT(2) #define I40E_AQ_SET_FEC_REQUEST_RS BIT(3) #define I40E_AQ_SET_FEC_AUTO BIT(4) #define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0 #define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT) u8 reserved; }; I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); /* Set MAC Config command data structure (direct 0x0603) */ struct i40e_aq_set_mac_config { __le16 max_frame_size; u8 params; #define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 #define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 #define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 #define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 #define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF #define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 #define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 #define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 #define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 #define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 #define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 #define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 #define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 #define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 #define I40E_AQ_SET_MAC_CONFIG_DROP_BLOCKING_PACKET_EN 0x80 u8 tx_timer_priority; /* bitmap */ __le16 tx_timer_value; __le16 fc_refresh_threshold; u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); /* Restart Auto-Negotiation (direct 0x605) */ struct i40e_aqc_set_link_restart_an { u8 command; #define I40E_AQ_PHY_RESTART_AN 0x02 #define I40E_AQ_PHY_LINK_ENABLE 0x04 u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); /* Get Link Status cmd & response data structure (direct 0x0607) */ struct i40e_aqc_get_link_status { __le16 command_flags; /* only field set on command */ #define I40E_AQ_LSE_MASK 0x3 #define I40E_AQ_LSE_NOP 0x0 #define I40E_AQ_LSE_DISABLE 0x2 #define I40E_AQ_LSE_ENABLE 0x3 /* only response uses this flag */ #define I40E_AQ_LSE_IS_ENABLED 0x1 u8 phy_type; /* i40e_aq_phy_type */ u8 link_speed; /* i40e_aq_link_speed */ u8 link_info; #define I40E_AQ_LINK_UP 0x01 /* obsolete */ #define I40E_AQ_LINK_UP_FUNCTION 0x01 #define I40E_AQ_LINK_FAULT 0x02 #define I40E_AQ_LINK_FAULT_TX 0x04 #define I40E_AQ_LINK_FAULT_RX 0x08 #define I40E_AQ_LINK_FAULT_REMOTE 0x10 #define I40E_AQ_LINK_UP_PORT 0x20 #define I40E_AQ_MEDIA_AVAILABLE 0x40 #define I40E_AQ_SIGNAL_DETECT 0x80 u8 an_info; #define I40E_AQ_AN_COMPLETED 0x01 #define I40E_AQ_LP_AN_ABILITY 0x02 #define I40E_AQ_PD_FAULT 0x04 #define I40E_AQ_FEC_EN 0x08 #define I40E_AQ_PHY_LOW_POWER 0x10 #define I40E_AQ_LINK_PAUSE_TX 0x20 #define I40E_AQ_LINK_PAUSE_RX 0x40 #define I40E_AQ_QUALIFIED_MODULE 0x80 u8 ext_info; #define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 #define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 #define I40E_AQ_LINK_TX_SHIFT 0x02 #define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) #define I40E_AQ_LINK_TX_ACTIVE 0x00 #define I40E_AQ_LINK_TX_DRAINED 0x01 #define I40E_AQ_LINK_TX_FLUSHED 0x03 #define I40E_AQ_LINK_FORCED_40G 0x10 /* 25G Error Codes */ #define I40E_AQ_25G_NO_ERR 0X00 #define I40E_AQ_25G_NOT_PRESENT 0X01 #define I40E_AQ_25G_NVM_CRC_ERR 0X02 #define I40E_AQ_25G_SBUS_UCODE_ERR 0X03 #define I40E_AQ_25G_SERDES_UCODE_ERR 0X04 #define I40E_AQ_25G_NIMB_UCODE_ERR 0X05 u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ /* Since firmware API 1.7 loopback field keeps power class info as well */ #define I40E_AQ_LOOPBACK_MASK 0x07 #define I40E_AQ_PWR_CLASS_SHIFT_LB 6 #define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB) __le16 max_frame_size; u8 config; #define I40E_AQ_CONFIG_FEC_KR_ENA 0x01 #define I40E_AQ_CONFIG_FEC_RS_ENA 0x02 #define I40E_AQ_CONFIG_CRC_ENA 0x04 #define I40E_AQ_CONFIG_PACING_MASK 0x78 union { struct { u8 power_desc; #define I40E_AQ_LINK_POWER_CLASS_1 0x00 #define I40E_AQ_LINK_POWER_CLASS_2 0x01 #define I40E_AQ_LINK_POWER_CLASS_3 0x02 #define I40E_AQ_LINK_POWER_CLASS_4 0x03 #define I40E_AQ_PWR_CLASS_MASK 0x03 u8 reserved[4]; }; struct { u8 link_type[4]; u8 link_type_ext; }; }; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); /* Set event mask command (direct 0x613) */ struct i40e_aqc_set_phy_int_mask { u8 reserved[8]; __le16 event_mask; #define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 #define I40E_AQ_EVENT_MEDIA_NA 0x0004 #define I40E_AQ_EVENT_LINK_FAULT 0x0008 #define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 #define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 #define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 #define I40E_AQ_EVENT_AN_COMPLETED 0x0080 #define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 #define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 u8 reserved1[6]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); /* Get Local AN advt register (direct 0x0614) * Set Local AN advt register (direct 0x0615) * Get Link Partner AN advt register (direct 0x0616) */ struct i40e_aqc_an_advt_reg { __le32 local_an_reg0; __le16 local_an_reg1; u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); /* Set Loopback mode (0x0618) */ struct i40e_aqc_set_lb_mode { u8 lb_level; #define I40E_AQ_LB_NONE 0 #define I40E_AQ_LB_MAC 1 #define I40E_AQ_LB_SERDES 2 #define I40E_AQ_LB_PHY_INT 3 #define I40E_AQ_LB_PHY_EXT 4 #define I40E_AQ_LB_BASE_T_PCS 5 #define I40E_AQ_LB_BASE_T_EXT 6 #define I40E_AQ_LB_PHY_LOCAL 0x01 #define I40E_AQ_LB_PHY_REMOTE 0x02 #define I40E_AQ_LB_MAC_LOCAL 0x04 u8 lb_type; #define I40E_AQ_LB_LOCAL 0 #define I40E_AQ_LB_FAR 0x01 u8 speed; #define I40E_AQ_LB_SPEED_NONE 0 #define I40E_AQ_LB_SPEED_1G 1 #define I40E_AQ_LB_SPEED_10G 2 #define I40E_AQ_LB_SPEED_40G 3 #define I40E_AQ_LB_SPEED_20G 4 u8 force_speed; u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); /* Set PHY Debug command (0x0622) */ struct i40e_aqc_set_phy_debug { u8 command_flags; #define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT) #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 /* Disable link manageability on a single port */ #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 /* Disable link manageability on all ports needs both bits 4 and 5 */ #define I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW 0x20 u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); enum i40e_aq_phy_reg_type { I40E_AQC_PHY_REG_INTERNAL = 0x1, I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 }; +#pragma pack(1) /* Run PHY Activity (0x0626) */ struct i40e_aqc_run_phy_activity { - __le16 activity_id; - u8 flags; - u8 reserved1; - __le32 control; - __le32 data; - u8 reserved2[4]; + u8 cmd_flags; + __le16 activity_id; +#define I40E_AQ_RUN_PHY_ACT_ID_USR_DFND 0x10 + u8 reserved; + union { + struct { + __le32 dnl_opcode; +#define I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_STAT_DUR 0x801a +#define I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_STAT 0x801b +#define I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_DUR 0x1801b + __le32 data; + u8 reserved2[4]; + } cmd; + struct { + __le32 cmd_status; +#define I40E_AQ_RUN_PHY_ACT_CMD_STAT_SUCC 0x4 +#define I40E_AQ_RUN_PHY_ACT_CMD_STAT_MASK 0xFFFF + __le32 data0; + __le32 data1; + } resp; + } params; }; +#pragma pack() I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity); /* Set PHY Register command (0x0628) */ /* Get PHY Register command (0x0629) */ struct i40e_aqc_phy_register_access { u8 phy_interface; #define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0 #define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1 #define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2 u8 dev_addres; u8 cmd_flags; #define I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE 0x01 #define I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER 0x02 #define I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT 2 #define I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK (0x3 << \ I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) u8 reserved1; __le32 reg_address; __le32 reg_value; u8 reserved2[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access); /* NVM Read command (indirect 0x0701) * NVM Erase commands (direct 0x0702) * NVM Update commands (indirect 0x0703) */ struct i40e_aqc_nvm_update { u8 command_flags; #define I40E_AQ_NVM_LAST_CMD 0x01 #define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20 #define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40 #define I40E_AQ_NVM_FLASH_ONLY 0x80 #define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1 #define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03 #define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03 #define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01 u8 module_pointer; __le16 length; __le32 offset; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); /* NVM Config Read (indirect 0x0704) */ struct i40e_aqc_nvm_config_read { __le16 cmd_flags; #define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 #define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0 #define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1 __le16 element_count; __le16 element_id; /* Feature/field ID */ __le16 element_id_msw; /* MSWord of field ID */ __le32 address_high; __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); /* NVM Config Write (indirect 0x0705) */ struct i40e_aqc_nvm_config_write { __le16 cmd_flags; __le16 element_count; u8 reserved[4]; __le32 address_high; __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); /* Used for 0x0704 as well as for 0x0705 commands */ #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1 #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \ (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) #define I40E_AQ_ANVM_FEATURE 0 #define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT) struct i40e_aqc_nvm_config_data_feature { __le16 feature_id; #define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01 #define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08 #define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10 __le16 feature_options; __le16 feature_selection; }; I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature); struct i40e_aqc_nvm_config_data_immediate_field { __le32 field_id; __le32 field_value; __le16 field_options; __le16 reserved; }; I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field); /* OEM Post Update (indirect 0x0720) * no command data struct used */ struct i40e_aqc_nvm_oem_post_update { #define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01 u8 sel_data; u8 reserved[7]; }; I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update); struct i40e_aqc_nvm_oem_post_update_buffer { u8 str_len; u8 dev_addr; __le16 eeprom_addr; u8 data[36]; }; I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); /* Thermal Sensor (indirect 0x0721) * read or set thermal sensor configs and values * takes a sensor and command specific data buffer, not detailed here */ struct i40e_aqc_thermal_sensor { u8 sensor_action; #define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0 #define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1 #define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2 u8 reserved[7]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_thermal_sensor); /* Send to PF command (indirect 0x0801) id is only used by PF * Send to VF command (indirect 0x0802) id is only used by PF * Send to Peer PF command (indirect 0x0803) */ struct i40e_aqc_pf_vf_message { __le32 id; u8 reserved[4]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); /* Alternate structure */ /* Direct write (direct 0x0900) * Direct read (direct 0x0902) */ struct i40e_aqc_alternate_write { __le32 address0; __le32 data0; __le32 address1; __le32 data1; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write); /* Indirect write (indirect 0x0901) * Indirect read (indirect 0x0903) */ struct i40e_aqc_alternate_ind_write { __le32 address; __le32 length; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); /* Done alternate write (direct 0x0904) * uses i40e_aq_desc */ struct i40e_aqc_alternate_write_done { __le16 cmd_flags; #define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 #define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 #define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 #define I40E_AQ_ALTERNATE_RESET_NEEDED 2 u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); /* Set OEM mode (direct 0x0905) */ struct i40e_aqc_alternate_set_mode { __le32 mode; #define I40E_AQ_ALTERNATE_MODE_NONE 0 #define I40E_AQ_ALTERNATE_MODE_OEM 1 u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); /* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */ /* async events 0x10xx */ /* Lan Queue Overflow Event (direct, 0x1001) */ struct i40e_aqc_lan_overflow { __le32 prtdcb_rupto; __le32 otx_ctl; u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); /* Get LLDP MIB (indirect 0x0A00) */ struct i40e_aqc_lldp_get_mib { u8 type; u8 reserved1; #define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 #define I40E_AQ_LLDP_MIB_LOCAL 0x0 #define I40E_AQ_LLDP_MIB_REMOTE 0x1 #define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 #define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC #define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 #define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 #define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 #define I40E_AQ_LLDP_TX_SHIFT 0x4 #define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) /* TX pause flags use I40E_AQ_LINK_TX_* above */ __le16 local_len; __le16 remote_len; u8 reserved2[2]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); /* Configure LLDP MIB Change Event (direct 0x0A01) * also used for the event (with type in the command field) */ struct i40e_aqc_lldp_update_mib { u8 command; #define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 #define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 u8 reserved[7]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); /* Add LLDP TLV (indirect 0x0A02) * Delete LLDP TLV (indirect 0x0A04) */ struct i40e_aqc_lldp_add_tlv { u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ u8 reserved1[1]; __le16 len; u8 reserved2[4]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); /* Update LLDP TLV (indirect 0x0A03) */ struct i40e_aqc_lldp_update_tlv { u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ u8 reserved; __le16 old_len; __le16 new_offset; __le16 new_len; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); /* Stop LLDP (direct 0x0A05) */ struct i40e_aqc_lldp_stop { u8 command; #define I40E_AQ_LLDP_AGENT_STOP 0x0 #define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 #define I40E_AQ_LLDP_AGENT_STOP_PERSIST 0x2 u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); /* Start LLDP (direct 0x0A06) */ struct i40e_aqc_lldp_start { u8 command; #define I40E_AQ_LLDP_AGENT_START 0x1 #define I40E_AQ_LLDP_AGENT_START_PERSIST 0x2 u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); /* Set DCB (direct 0x0303) */ struct i40e_aqc_set_dcb_parameters { u8 command; #define I40E_AQ_DCB_SET_AGENT 0x1 #define I40E_DCB_VALID 0x1 u8 valid_flags; u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters); /* Get CEE DCBX Oper Config (0x0A07) * uses the generic descriptor struct * returns below as indirect response */ #define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0 #define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT) #define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3 #define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT) #define I40E_AQC_CEE_APP_FIP_SHIFT 0x8 #define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT) #define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0 #define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT) #define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3 #define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT) #define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8 #define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT) #define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8 #define I40E_AQC_CEE_FCOE_STATUS_MASK (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT) #define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xB #define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT) #define I40E_AQC_CEE_FIP_STATUS_SHIFT 0x10 #define I40E_AQC_CEE_FIP_STATUS_MASK (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT) /* struct i40e_aqc_get_cee_dcb_cfg_v1_resp was originally defined with * word boundary layout issues, which the Linux compilers silently deal * with by adding padding, making the actual struct larger than designed. * However, the FW compiler for the NIC is less lenient and complains * about the struct. Hence, the struct defined here has an extra byte in * fields reserved3 and reserved4 to directly acknowledge that padding, * and the new length is used in the length check macro. */ struct i40e_aqc_get_cee_dcb_cfg_v1_resp { u8 reserved1; u8 oper_num_tc; u8 oper_prio_tc[4]; u8 reserved2; u8 oper_tc_bw[8]; u8 oper_pfc_en; u8 reserved3[2]; __le16 oper_app_prio; u8 reserved4[2]; __le16 tlv_status; }; I40E_CHECK_STRUCT_LEN(0x18, i40e_aqc_get_cee_dcb_cfg_v1_resp); struct i40e_aqc_get_cee_dcb_cfg_resp { u8 oper_num_tc; u8 oper_prio_tc[4]; u8 oper_tc_bw[8]; u8 oper_pfc_en; __le16 oper_app_prio; __le32 tlv_status; u8 reserved[12]; }; I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp); /* Set Local LLDP MIB (indirect 0x0A08) * Used to replace the local MIB of a given LLDP agent. e.g. DCBx */ struct i40e_aqc_lldp_set_local_mib { #define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0 #define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \ SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT) #define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0 #define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1) #define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT) #define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1 u8 type; u8 reserved0; __le16 length; u8 reserved1[4]; __le32 address_high; __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib); struct i40e_aqc_lldp_set_local_mib_resp { #define SET_LOCAL_MIB_RESP_EVENT_TRIGGERED_MASK 0x01 u8 status; u8 reserved[15]; }; I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_lldp_set_local_mib_resp); /* Stop/Start LLDP Agent (direct 0x0A09) * Used for stopping/starting specific LLDP agent. e.g. DCBx */ struct i40e_aqc_lldp_stop_start_specific_agent { #define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0 #define I40E_AQC_START_SPECIFIC_AGENT_MASK \ (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT) u8 command; u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent); /* Restore LLDP Agent factory settings (direct 0x0A0A) */ struct i40e_aqc_lldp_restore { u8 command; #define I40E_AQ_LLDP_AGENT_RESTORE_NOT 0x0 #define I40E_AQ_LLDP_AGENT_RESTORE 0x1 u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_restore); /* Add Udp Tunnel command and completion (direct 0x0B00) */ struct i40e_aqc_add_udp_tunnel { __le16 udp_port; u8 reserved0[3]; u8 protocol_type; #define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 #define I40E_AQC_TUNNEL_TYPE_NGE 0x01 #define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 #define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11 u8 reserved1[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); struct i40e_aqc_add_udp_tunnel_completion { __le16 udp_port; u8 filter_entry_index; u8 multiple_pfs; #define I40E_AQC_SINGLE_PF 0x0 #define I40E_AQC_MULTIPLE_PFS 0x1 u8 total_filters; u8 reserved[11]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); /* remove UDP Tunnel command (0x0B01) */ struct i40e_aqc_remove_udp_tunnel { u8 reserved[2]; u8 index; /* 0 to 15 */ u8 reserved2[13]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); struct i40e_aqc_del_udp_tunnel_completion { __le16 udp_port; u8 index; /* 0 to 15 */ u8 multiple_pfs; u8 total_filters_used; u8 reserved1[11]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); struct i40e_aqc_get_set_rss_key { #define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15) #define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 #define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) __le16 vsi_id; u8 reserved[6]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key); struct i40e_aqc_get_set_rss_key_data { u8 standard_rss_key[0x28]; u8 extended_hash_key[0xc]; }; I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); struct i40e_aqc_get_set_rss_lut { #define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15) #define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 #define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) __le16 vsi_id; #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 __le16 flags; u8 reserved[4]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut); /* tunnel key structure 0x0B10 */ struct i40e_aqc_tunnel_key_structure { u8 key1_off; u8 key2_off; u8 key1_len; /* 0 to 15 */ u8 key2_len; /* 0 to 15 */ u8 flags; #define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 /* response flags */ #define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 #define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 #define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 u8 network_key_index; #define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 #define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 #define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2 #define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3 u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); /* OEM mode commands (direct 0xFE0x) */ struct i40e_aqc_oem_param_change { __le32 param_type; #define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 #define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 #define I40E_AQ_OEM_PARAM_MAC 2 __le32 param_value1; __le16 param_value2; u8 reserved[6]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); struct i40e_aqc_oem_state_change { __le32 state; #define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 #define I40E_AQ_OEM_STATE_LINK_UP 0x1 u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); /* Initialize OCSD (0xFE02, direct) */ struct i40e_aqc_opc_oem_ocsd_initialize { u8 type_status; u8 reserved1[3]; __le32 ocsd_memory_block_addr_high; __le32 ocsd_memory_block_addr_low; __le32 requested_update_interval; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize); /* Initialize OCBB (0xFE03, direct) */ struct i40e_aqc_opc_oem_ocbb_initialize { u8 type_status; u8 reserved1[3]; __le32 ocbb_memory_block_addr_high; __le32 ocbb_memory_block_addr_low; u8 reserved2[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize); /* debug commands */ /* get device id (0xFF00) uses the generic structure */ /* set test more (0xFF01, internal) */ struct i40e_acq_set_test_mode { u8 mode; #define I40E_AQ_TEST_PARTIAL 0 #define I40E_AQ_TEST_FULL 1 #define I40E_AQ_TEST_NVM 2 u8 reserved[3]; u8 command; #define I40E_AQ_TEST_OPEN 0 #define I40E_AQ_TEST_CLOSE 1 #define I40E_AQ_TEST_INC 2 u8 reserved2[3]; __le32 address_high; __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); /* Debug Read Register command (0xFF03) * Debug Write Register command (0xFF04) */ struct i40e_aqc_debug_reg_read_write { __le32 reserved; __le32 address; __le32 value_high; __le32 value_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write); /* Scatter/gather Reg Read (indirect 0xFF05) * Scatter/gather Reg Write (indirect 0xFF06) */ /* i40e_aq_desc is used for the command */ struct i40e_aqc_debug_reg_sg_element_data { __le32 address; __le32 value; }; /* Debug Modify register (direct 0xFF07) */ struct i40e_aqc_debug_modify_reg { __le32 address; __le32 value; __le32 clear_mask; __le32 set_mask; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); /* dump internal data (0xFF08, indirect) */ #define I40E_AQ_CLUSTER_ID_AUX 0 #define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1 #define I40E_AQ_CLUSTER_ID_TXSCHED 2 #define I40E_AQ_CLUSTER_ID_HMC 3 #define I40E_AQ_CLUSTER_ID_MAC0 4 #define I40E_AQ_CLUSTER_ID_MAC1 5 #define I40E_AQ_CLUSTER_ID_MAC2 6 #define I40E_AQ_CLUSTER_ID_MAC3 7 #define I40E_AQ_CLUSTER_ID_DCB 8 #define I40E_AQ_CLUSTER_ID_EMP_MEM 9 #define I40E_AQ_CLUSTER_ID_PKT_BUF 10 #define I40E_AQ_CLUSTER_ID_ALTRAM 11 struct i40e_aqc_debug_dump_internals { u8 cluster_id; u8 table_id; __le16 data_size; __le32 idx; __le32 address_high; __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); struct i40e_aqc_debug_modify_internals { u8 cluster_id; u8 cluster_specific_params[7]; __le32 address_high; __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); #endif /* _I40E_ADMINQ_CMD_H_ */ Index: head/sys/dev/ixl/i40e_common.c =================================================================== --- head/sys/dev/ixl/i40e_common.c (revision 365230) +++ head/sys/dev/ixl/i40e_common.c (revision 365231) @@ -1,7371 +1,7696 @@ /****************************************************************************** Copyright (c) 2013-2018, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "i40e_type.h" #include "i40e_adminq.h" #include "i40e_prototype.h" #include "virtchnl.h" /** * i40e_set_mac_type - Sets MAC type * @hw: pointer to the HW structure * * This function sets the mac type of the adapter based on the * vendor ID and device ID stored in the hw structure. **/ enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw) { enum i40e_status_code status = I40E_SUCCESS; DEBUGFUNC("i40e_set_mac_type\n"); if (hw->vendor_id == I40E_INTEL_VENDOR_ID) { switch (hw->device_id) { case I40E_DEV_ID_SFP_XL710: case I40E_DEV_ID_QEMU: case I40E_DEV_ID_KX_B: case I40E_DEV_ID_KX_C: case I40E_DEV_ID_QSFP_A: case I40E_DEV_ID_QSFP_B: case I40E_DEV_ID_QSFP_C: case I40E_DEV_ID_10G_BASE_T: case I40E_DEV_ID_10G_BASE_T4: + case I40E_DEV_ID_10G_BASE_T_BC: + case I40E_DEV_ID_10G_B: + case I40E_DEV_ID_10G_SFP: + case I40E_DEV_ID_5G_BASE_T_BC: case I40E_DEV_ID_20G_KR2: case I40E_DEV_ID_20G_KR2_A: case I40E_DEV_ID_25G_B: case I40E_DEV_ID_25G_SFP28: case I40E_DEV_ID_X710_N3000: case I40E_DEV_ID_XXV710_N3000: hw->mac.type = I40E_MAC_XL710; break; case I40E_DEV_ID_KX_X722: case I40E_DEV_ID_QSFP_X722: case I40E_DEV_ID_SFP_X722: case I40E_DEV_ID_1G_BASE_T_X722: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_SFP_I_X722: hw->mac.type = I40E_MAC_X722; break; case I40E_DEV_ID_X722_VF: hw->mac.type = I40E_MAC_X722_VF; break; case I40E_DEV_ID_VF: case I40E_DEV_ID_VF_HV: case I40E_DEV_ID_ADAPTIVE_VF: hw->mac.type = I40E_MAC_VF; break; default: hw->mac.type = I40E_MAC_GENERIC; break; } } else { status = I40E_ERR_DEVICE_NOT_SUPPORTED; } DEBUGOUT2("i40e_set_mac_type found mac: %d, returns: %d\n", hw->mac.type, status); return status; } /** * i40e_aq_str - convert AQ err code to a string * @hw: pointer to the HW structure * @aq_err: the AQ error code to convert **/ const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) { switch (aq_err) { case I40E_AQ_RC_OK: return "OK"; case I40E_AQ_RC_EPERM: return "I40E_AQ_RC_EPERM"; case I40E_AQ_RC_ENOENT: return "I40E_AQ_RC_ENOENT"; case I40E_AQ_RC_ESRCH: return "I40E_AQ_RC_ESRCH"; case I40E_AQ_RC_EINTR: return "I40E_AQ_RC_EINTR"; case I40E_AQ_RC_EIO: return "I40E_AQ_RC_EIO"; case I40E_AQ_RC_ENXIO: return "I40E_AQ_RC_ENXIO"; case I40E_AQ_RC_E2BIG: return "I40E_AQ_RC_E2BIG"; case I40E_AQ_RC_EAGAIN: return "I40E_AQ_RC_EAGAIN"; case I40E_AQ_RC_ENOMEM: return "I40E_AQ_RC_ENOMEM"; case I40E_AQ_RC_EACCES: return "I40E_AQ_RC_EACCES"; case I40E_AQ_RC_EFAULT: return "I40E_AQ_RC_EFAULT"; case I40E_AQ_RC_EBUSY: return "I40E_AQ_RC_EBUSY"; case I40E_AQ_RC_EEXIST: return "I40E_AQ_RC_EEXIST"; case I40E_AQ_RC_EINVAL: return "I40E_AQ_RC_EINVAL"; case I40E_AQ_RC_ENOTTY: return "I40E_AQ_RC_ENOTTY"; case I40E_AQ_RC_ENOSPC: return "I40E_AQ_RC_ENOSPC"; case I40E_AQ_RC_ENOSYS: return "I40E_AQ_RC_ENOSYS"; case I40E_AQ_RC_ERANGE: return "I40E_AQ_RC_ERANGE"; case I40E_AQ_RC_EFLUSHED: return "I40E_AQ_RC_EFLUSHED"; case I40E_AQ_RC_BAD_ADDR: return "I40E_AQ_RC_BAD_ADDR"; case I40E_AQ_RC_EMODE: return "I40E_AQ_RC_EMODE"; case I40E_AQ_RC_EFBIG: return "I40E_AQ_RC_EFBIG"; } snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); return hw->err_str; } /** * i40e_stat_str - convert status err code to a string * @hw: pointer to the HW structure * @stat_err: the status error code to convert **/ const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err) { switch (stat_err) { case I40E_SUCCESS: return "OK"; case I40E_ERR_NVM: return "I40E_ERR_NVM"; case I40E_ERR_NVM_CHECKSUM: return "I40E_ERR_NVM_CHECKSUM"; case I40E_ERR_PHY: return "I40E_ERR_PHY"; case I40E_ERR_CONFIG: return "I40E_ERR_CONFIG"; case I40E_ERR_PARAM: return "I40E_ERR_PARAM"; case I40E_ERR_MAC_TYPE: return "I40E_ERR_MAC_TYPE"; case I40E_ERR_UNKNOWN_PHY: return "I40E_ERR_UNKNOWN_PHY"; case I40E_ERR_LINK_SETUP: return "I40E_ERR_LINK_SETUP"; case I40E_ERR_ADAPTER_STOPPED: return "I40E_ERR_ADAPTER_STOPPED"; case I40E_ERR_INVALID_MAC_ADDR: return "I40E_ERR_INVALID_MAC_ADDR"; case I40E_ERR_DEVICE_NOT_SUPPORTED: return "I40E_ERR_DEVICE_NOT_SUPPORTED"; case I40E_ERR_MASTER_REQUESTS_PENDING: return "I40E_ERR_MASTER_REQUESTS_PENDING"; case I40E_ERR_INVALID_LINK_SETTINGS: return "I40E_ERR_INVALID_LINK_SETTINGS"; case I40E_ERR_AUTONEG_NOT_COMPLETE: return "I40E_ERR_AUTONEG_NOT_COMPLETE"; case I40E_ERR_RESET_FAILED: return "I40E_ERR_RESET_FAILED"; case I40E_ERR_SWFW_SYNC: return "I40E_ERR_SWFW_SYNC"; case I40E_ERR_NO_AVAILABLE_VSI: return "I40E_ERR_NO_AVAILABLE_VSI"; case I40E_ERR_NO_MEMORY: return "I40E_ERR_NO_MEMORY"; case I40E_ERR_BAD_PTR: return "I40E_ERR_BAD_PTR"; case I40E_ERR_RING_FULL: return "I40E_ERR_RING_FULL"; case I40E_ERR_INVALID_PD_ID: return "I40E_ERR_INVALID_PD_ID"; case I40E_ERR_INVALID_QP_ID: return "I40E_ERR_INVALID_QP_ID"; case I40E_ERR_INVALID_CQ_ID: return "I40E_ERR_INVALID_CQ_ID"; case I40E_ERR_INVALID_CEQ_ID: return "I40E_ERR_INVALID_CEQ_ID"; case I40E_ERR_INVALID_AEQ_ID: return "I40E_ERR_INVALID_AEQ_ID"; case I40E_ERR_INVALID_SIZE: return "I40E_ERR_INVALID_SIZE"; case I40E_ERR_INVALID_ARP_INDEX: return "I40E_ERR_INVALID_ARP_INDEX"; case I40E_ERR_INVALID_FPM_FUNC_ID: return "I40E_ERR_INVALID_FPM_FUNC_ID"; case I40E_ERR_QP_INVALID_MSG_SIZE: return "I40E_ERR_QP_INVALID_MSG_SIZE"; case I40E_ERR_QP_TOOMANY_WRS_POSTED: return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; case I40E_ERR_INVALID_FRAG_COUNT: return "I40E_ERR_INVALID_FRAG_COUNT"; case I40E_ERR_QUEUE_EMPTY: return "I40E_ERR_QUEUE_EMPTY"; case I40E_ERR_INVALID_ALIGNMENT: return "I40E_ERR_INVALID_ALIGNMENT"; case I40E_ERR_FLUSHED_QUEUE: return "I40E_ERR_FLUSHED_QUEUE"; case I40E_ERR_INVALID_PUSH_PAGE_INDEX: return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; case I40E_ERR_INVALID_IMM_DATA_SIZE: return "I40E_ERR_INVALID_IMM_DATA_SIZE"; case I40E_ERR_TIMEOUT: return "I40E_ERR_TIMEOUT"; case I40E_ERR_OPCODE_MISMATCH: return "I40E_ERR_OPCODE_MISMATCH"; case I40E_ERR_CQP_COMPL_ERROR: return "I40E_ERR_CQP_COMPL_ERROR"; case I40E_ERR_INVALID_VF_ID: return "I40E_ERR_INVALID_VF_ID"; case I40E_ERR_INVALID_HMCFN_ID: return "I40E_ERR_INVALID_HMCFN_ID"; case I40E_ERR_BACKING_PAGE_ERROR: return "I40E_ERR_BACKING_PAGE_ERROR"; case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; case I40E_ERR_INVALID_PBLE_INDEX: return "I40E_ERR_INVALID_PBLE_INDEX"; case I40E_ERR_INVALID_SD_INDEX: return "I40E_ERR_INVALID_SD_INDEX"; case I40E_ERR_INVALID_PAGE_DESC_INDEX: return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; case I40E_ERR_INVALID_SD_TYPE: return "I40E_ERR_INVALID_SD_TYPE"; case I40E_ERR_MEMCPY_FAILED: return "I40E_ERR_MEMCPY_FAILED"; case I40E_ERR_INVALID_HMC_OBJ_INDEX: return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; case I40E_ERR_INVALID_HMC_OBJ_COUNT: return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; case I40E_ERR_INVALID_SRQ_ARM_LIMIT: return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; case I40E_ERR_SRQ_ENABLED: return "I40E_ERR_SRQ_ENABLED"; case I40E_ERR_ADMIN_QUEUE_ERROR: return "I40E_ERR_ADMIN_QUEUE_ERROR"; case I40E_ERR_ADMIN_QUEUE_TIMEOUT: return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; case I40E_ERR_BUF_TOO_SHORT: return "I40E_ERR_BUF_TOO_SHORT"; case I40E_ERR_ADMIN_QUEUE_FULL: return "I40E_ERR_ADMIN_QUEUE_FULL"; case I40E_ERR_ADMIN_QUEUE_NO_WORK: return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; case I40E_ERR_BAD_IWARP_CQE: return "I40E_ERR_BAD_IWARP_CQE"; case I40E_ERR_NVM_BLANK_MODE: return "I40E_ERR_NVM_BLANK_MODE"; case I40E_ERR_NOT_IMPLEMENTED: return "I40E_ERR_NOT_IMPLEMENTED"; case I40E_ERR_PE_DOORBELL_NOT_ENABLED: return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; case I40E_ERR_DIAG_TEST_FAILED: return "I40E_ERR_DIAG_TEST_FAILED"; case I40E_ERR_NOT_READY: return "I40E_ERR_NOT_READY"; case I40E_NOT_SUPPORTED: return "I40E_NOT_SUPPORTED"; case I40E_ERR_FIRMWARE_API_VERSION: return "I40E_ERR_FIRMWARE_API_VERSION"; case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; } snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); return hw->err_str; } /** * i40e_debug_aq * @hw: debug mask related to admin queue * @mask: debug mask * @desc: pointer to admin queue descriptor * @buffer: pointer to command buffer * @buf_len: max length of buffer * * Dumps debug log about adminq command with descriptor contents. **/ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, void *buffer, u16 buf_len) { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; u32 effective_mask = hw->debug_mask & mask; u8 *buf = (u8 *)buffer; u16 len; u16 i; if (!effective_mask || !desc) return; len = LE16_TO_CPU(aq_desc->datalen); i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", LE16_TO_CPU(aq_desc->opcode), LE16_TO_CPU(aq_desc->flags), LE16_TO_CPU(aq_desc->datalen), LE16_TO_CPU(aq_desc->retval)); i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, "\tcookie (h,l) 0x%08X 0x%08X\n", LE32_TO_CPU(aq_desc->cookie_high), LE32_TO_CPU(aq_desc->cookie_low)); i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, "\tparam (0,1) 0x%08X 0x%08X\n", LE32_TO_CPU(aq_desc->params.internal.param0), LE32_TO_CPU(aq_desc->params.internal.param1)); i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, "\taddr (h,l) 0x%08X 0x%08X\n", LE32_TO_CPU(aq_desc->params.external.addr_high), LE32_TO_CPU(aq_desc->params.external.addr_low)); if (buffer && (buf_len != 0) && (len != 0) && (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) { i40e_debug(hw, mask, "AQ CMD Buffer:\n"); if (buf_len < len) len = buf_len; /* write the full 16-byte chunks */ for (i = 0; i < (len - 16); i += 16) i40e_debug(hw, mask, "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", i, buf[i], buf[i+1], buf[i+2], buf[i+3], buf[i+4], buf[i+5], buf[i+6], buf[i+7], buf[i+8], buf[i+9], buf[i+10], buf[i+11], buf[i+12], buf[i+13], buf[i+14], buf[i+15]); /* the most we could have left is 16 bytes, pad with zeros */ if (i < len) { char d_buf[16]; int j, i_sav; i_sav = i; memset(d_buf, 0, sizeof(d_buf)); for (j = 0; i < len; j++, i++) d_buf[j] = buf[i]; i40e_debug(hw, mask, "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", i_sav, d_buf[0], d_buf[1], d_buf[2], d_buf[3], d_buf[4], d_buf[5], d_buf[6], d_buf[7], d_buf[8], d_buf[9], d_buf[10], d_buf[11], d_buf[12], d_buf[13], d_buf[14], d_buf[15]); } } } /** * i40e_check_asq_alive * @hw: pointer to the hw struct * * Returns TRUE if Queue is enabled else FALSE. **/ bool i40e_check_asq_alive(struct i40e_hw *hw) { if (hw->aq.asq.len) { if (!i40e_is_vf(hw)) return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK); else return !!(rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQENABLE_MASK); } return FALSE; } /** * i40e_aq_queue_shutdown * @hw: pointer to the hw struct * @unloading: is the driver unloading itself * * Tell the Firmware that we're shutting down the AdminQ and whether * or not the driver is unloading as well. **/ enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading) { struct i40e_aq_desc desc; struct i40e_aqc_queue_shutdown *cmd = (struct i40e_aqc_queue_shutdown *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_queue_shutdown); if (unloading) cmd->driver_unloading = CPU_TO_LE32(I40E_AQ_DRIVER_UNLOADING); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); return status; } /** * i40e_aq_get_set_rss_lut * @hw: pointer to the hardware structure * @vsi_id: vsi fw index * @pf_lut: for PF table set TRUE, for VSI table set FALSE * @lut: pointer to the lut buffer provided by the caller * @lut_size: size of the lut buffer * @set: set TRUE to set the table, FALSE to get the table * * Internal function to get or set RSS look up table **/ static enum i40e_status_code i40e_aq_get_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, bool pf_lut, u8 *lut, u16 lut_size, bool set) { enum i40e_status_code status; struct i40e_aq_desc desc; struct i40e_aqc_get_set_rss_lut *cmd_resp = (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; if (set) i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_rss_lut); else i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_rss_lut); /* Indirect command */ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); cmd_resp->vsi_id = CPU_TO_LE16((u16)((vsi_id << I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); if (pf_lut) cmd_resp->flags |= CPU_TO_LE16((u16) ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); else cmd_resp->flags |= CPU_TO_LE16((u16) ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL); return status; } /** * i40e_aq_get_rss_lut * @hw: pointer to the hardware structure * @vsi_id: vsi fw index * @pf_lut: for PF table set TRUE, for VSI table set FALSE * @lut: pointer to the lut buffer provided by the caller * @lut_size: size of the lut buffer * * get the RSS lookup table, PF or VSI type **/ enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, bool pf_lut, u8 *lut, u16 lut_size) { return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, FALSE); } /** * i40e_aq_set_rss_lut * @hw: pointer to the hardware structure * @vsi_id: vsi fw index * @pf_lut: for PF table set TRUE, for VSI table set FALSE * @lut: pointer to the lut buffer provided by the caller * @lut_size: size of the lut buffer * * set the RSS lookup table, PF or VSI type **/ enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, bool pf_lut, u8 *lut, u16 lut_size) { return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, TRUE); } /** * i40e_aq_get_set_rss_key * @hw: pointer to the hw struct * @vsi_id: vsi fw index * @key: pointer to key info struct * @set: set TRUE to set the key, FALSE to get the key * * get the RSS key per VSI **/ static enum i40e_status_code i40e_aq_get_set_rss_key(struct i40e_hw *hw, u16 vsi_id, struct i40e_aqc_get_set_rss_key_data *key, bool set) { enum i40e_status_code status; struct i40e_aq_desc desc; struct i40e_aqc_get_set_rss_key *cmd_resp = (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); if (set) i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_rss_key); else i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_rss_key); /* Indirect command */ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); cmd_resp->vsi_id = CPU_TO_LE16((u16)((vsi_id << I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); return status; } /** * i40e_aq_get_rss_key * @hw: pointer to the hw struct * @vsi_id: vsi fw index * @key: pointer to key info struct * **/ enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw, u16 vsi_id, struct i40e_aqc_get_set_rss_key_data *key) { return i40e_aq_get_set_rss_key(hw, vsi_id, key, FALSE); } /** * i40e_aq_set_rss_key * @hw: pointer to the hw struct * @vsi_id: vsi fw index * @key: pointer to key info struct * * set the RSS key per VSI **/ enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw, u16 vsi_id, struct i40e_aqc_get_set_rss_key_data *key) { return i40e_aq_get_set_rss_key(hw, vsi_id, key, TRUE); } /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the * hardware to a bit-field that can be used by SW to more easily determine the * packet type. * * Macros are used to shorten the table lines and make this table human * readable. * * We store the PTYPE in the top byte of the bit field - this is just so that * we can check that the table doesn't have a row missing, as the index into * the table should be the PTYPE. * * Typical work flow: * * IF NOT i40e_ptype_lookup[ptype].known * THEN * Packet is unknown * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP * Use the rest of the fields to look at the tunnels, inner protocols, etc * ELSE * Use the enum i40e_rx_l2_ptype to decode the packet type * ENDIF */ /* macro to make the table lines short */ #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ { PTYPE, \ 1, \ I40E_RX_PTYPE_OUTER_##OUTER_IP, \ I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ I40E_RX_PTYPE_##OUTER_FRAG, \ I40E_RX_PTYPE_TUNNEL_##T, \ I40E_RX_PTYPE_TUNNEL_END_##TE, \ I40E_RX_PTYPE_##TEF, \ I40E_RX_PTYPE_INNER_PROT_##I, \ I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } #define I40E_PTT_UNUSED_ENTRY(PTYPE) \ { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* shorter macros makes the table fit but are terse */ #define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG #define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG #define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC /* Lookup table mapping the HW PTYPE to the bit field for decoding */ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { /* L2 Packet types */ I40E_PTT_UNUSED_ENTRY(0), I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT_UNUSED_ENTRY(4), I40E_PTT_UNUSED_ENTRY(5), I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT_UNUSED_ENTRY(8), I40E_PTT_UNUSED_ENTRY(9), I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), /* Non Tunneled IPv4 */ I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(25), I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), /* IPv4 --> IPv4 */ I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(32), I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), /* IPv4 --> IPv6 */ I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(39), I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT */ I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), /* IPv4 --> GRE/NAT --> IPv4 */ I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(47), I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT --> IPv6 */ I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(54), I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT --> MAC */ I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(62), I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(69), I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT --> MAC/VLAN */ I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(77), I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(84), I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), /* Non Tunneled IPv6 */ I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(91), I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), /* IPv6 --> IPv4 */ I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(98), I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), /* IPv6 --> IPv6 */ I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(105), I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT */ I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), /* IPv6 --> GRE/NAT -> IPv4 */ I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(113), I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> IPv6 */ I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(120), I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC */ I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(128), I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(135), I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC/VLAN */ I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(143), I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(150), I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), /* unused entries */ I40E_PTT_UNUSED_ENTRY(154), I40E_PTT_UNUSED_ENTRY(155), I40E_PTT_UNUSED_ENTRY(156), I40E_PTT_UNUSED_ENTRY(157), I40E_PTT_UNUSED_ENTRY(158), I40E_PTT_UNUSED_ENTRY(159), I40E_PTT_UNUSED_ENTRY(160), I40E_PTT_UNUSED_ENTRY(161), I40E_PTT_UNUSED_ENTRY(162), I40E_PTT_UNUSED_ENTRY(163), I40E_PTT_UNUSED_ENTRY(164), I40E_PTT_UNUSED_ENTRY(165), I40E_PTT_UNUSED_ENTRY(166), I40E_PTT_UNUSED_ENTRY(167), I40E_PTT_UNUSED_ENTRY(168), I40E_PTT_UNUSED_ENTRY(169), I40E_PTT_UNUSED_ENTRY(170), I40E_PTT_UNUSED_ENTRY(171), I40E_PTT_UNUSED_ENTRY(172), I40E_PTT_UNUSED_ENTRY(173), I40E_PTT_UNUSED_ENTRY(174), I40E_PTT_UNUSED_ENTRY(175), I40E_PTT_UNUSED_ENTRY(176), I40E_PTT_UNUSED_ENTRY(177), I40E_PTT_UNUSED_ENTRY(178), I40E_PTT_UNUSED_ENTRY(179), I40E_PTT_UNUSED_ENTRY(180), I40E_PTT_UNUSED_ENTRY(181), I40E_PTT_UNUSED_ENTRY(182), I40E_PTT_UNUSED_ENTRY(183), I40E_PTT_UNUSED_ENTRY(184), I40E_PTT_UNUSED_ENTRY(185), I40E_PTT_UNUSED_ENTRY(186), I40E_PTT_UNUSED_ENTRY(187), I40E_PTT_UNUSED_ENTRY(188), I40E_PTT_UNUSED_ENTRY(189), I40E_PTT_UNUSED_ENTRY(190), I40E_PTT_UNUSED_ENTRY(191), I40E_PTT_UNUSED_ENTRY(192), I40E_PTT_UNUSED_ENTRY(193), I40E_PTT_UNUSED_ENTRY(194), I40E_PTT_UNUSED_ENTRY(195), I40E_PTT_UNUSED_ENTRY(196), I40E_PTT_UNUSED_ENTRY(197), I40E_PTT_UNUSED_ENTRY(198), I40E_PTT_UNUSED_ENTRY(199), I40E_PTT_UNUSED_ENTRY(200), I40E_PTT_UNUSED_ENTRY(201), I40E_PTT_UNUSED_ENTRY(202), I40E_PTT_UNUSED_ENTRY(203), I40E_PTT_UNUSED_ENTRY(204), I40E_PTT_UNUSED_ENTRY(205), I40E_PTT_UNUSED_ENTRY(206), I40E_PTT_UNUSED_ENTRY(207), I40E_PTT_UNUSED_ENTRY(208), I40E_PTT_UNUSED_ENTRY(209), I40E_PTT_UNUSED_ENTRY(210), I40E_PTT_UNUSED_ENTRY(211), I40E_PTT_UNUSED_ENTRY(212), I40E_PTT_UNUSED_ENTRY(213), I40E_PTT_UNUSED_ENTRY(214), I40E_PTT_UNUSED_ENTRY(215), I40E_PTT_UNUSED_ENTRY(216), I40E_PTT_UNUSED_ENTRY(217), I40E_PTT_UNUSED_ENTRY(218), I40E_PTT_UNUSED_ENTRY(219), I40E_PTT_UNUSED_ENTRY(220), I40E_PTT_UNUSED_ENTRY(221), I40E_PTT_UNUSED_ENTRY(222), I40E_PTT_UNUSED_ENTRY(223), I40E_PTT_UNUSED_ENTRY(224), I40E_PTT_UNUSED_ENTRY(225), I40E_PTT_UNUSED_ENTRY(226), I40E_PTT_UNUSED_ENTRY(227), I40E_PTT_UNUSED_ENTRY(228), I40E_PTT_UNUSED_ENTRY(229), I40E_PTT_UNUSED_ENTRY(230), I40E_PTT_UNUSED_ENTRY(231), I40E_PTT_UNUSED_ENTRY(232), I40E_PTT_UNUSED_ENTRY(233), I40E_PTT_UNUSED_ENTRY(234), I40E_PTT_UNUSED_ENTRY(235), I40E_PTT_UNUSED_ENTRY(236), I40E_PTT_UNUSED_ENTRY(237), I40E_PTT_UNUSED_ENTRY(238), I40E_PTT_UNUSED_ENTRY(239), I40E_PTT_UNUSED_ENTRY(240), I40E_PTT_UNUSED_ENTRY(241), I40E_PTT_UNUSED_ENTRY(242), I40E_PTT_UNUSED_ENTRY(243), I40E_PTT_UNUSED_ENTRY(244), I40E_PTT_UNUSED_ENTRY(245), I40E_PTT_UNUSED_ENTRY(246), I40E_PTT_UNUSED_ENTRY(247), I40E_PTT_UNUSED_ENTRY(248), I40E_PTT_UNUSED_ENTRY(249), I40E_PTT_UNUSED_ENTRY(250), I40E_PTT_UNUSED_ENTRY(251), I40E_PTT_UNUSED_ENTRY(252), I40E_PTT_UNUSED_ENTRY(253), I40E_PTT_UNUSED_ENTRY(254), I40E_PTT_UNUSED_ENTRY(255) }; /** * i40e_validate_mac_addr - Validate unicast MAC address * @mac_addr: pointer to MAC address * * Tests a MAC address to ensure it is a valid Individual Address **/ enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr) { enum i40e_status_code status = I40E_SUCCESS; DEBUGFUNC("i40e_validate_mac_addr"); /* Broadcast addresses ARE multicast addresses * Make sure it is not a multicast address * Reject the zero address */ if (I40E_IS_MULTICAST(mac_addr) || (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)) status = I40E_ERR_INVALID_MAC_ADDR; return status; } /** * i40e_init_shared_code - Initialize the shared code * @hw: pointer to hardware structure * * This assigns the MAC type and PHY code and inits the NVM. * Does not touch the hardware. This function must be called prior to any * other function in the shared code. The i40e_hw structure should be * memset to 0 prior to calling this function. The following fields in * hw structure should be filled in prior to calling this function: * hw_addr, back, device_id, vendor_id, subsystem_device_id, * subsystem_vendor_id, and revision_id **/ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw) { enum i40e_status_code status = I40E_SUCCESS; u32 port, ari, func_rid; DEBUGFUNC("i40e_init_shared_code"); i40e_set_mac_type(hw); switch (hw->mac.type) { case I40E_MAC_XL710: case I40E_MAC_X722: break; default: return I40E_ERR_DEVICE_NOT_SUPPORTED; } hw->phy.get_link_info = TRUE; /* Determine port number and PF number*/ port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; hw->port = (u8)port; ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; func_rid = rd32(hw, I40E_PF_FUNC_RID); if (ari) hw->pf_id = (u8)(func_rid & 0xff); else hw->pf_id = (u8)(func_rid & 0x7); /* NVMUpdate features structure initialization */ hw->nvmupd_features.major = I40E_NVMUPD_FEATURES_API_VER_MAJOR; hw->nvmupd_features.minor = I40E_NVMUPD_FEATURES_API_VER_MINOR; hw->nvmupd_features.size = sizeof(hw->nvmupd_features); i40e_memset(hw->nvmupd_features.features, 0x0, I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN * sizeof(*hw->nvmupd_features.features), I40E_NONDMA_MEM); /* No features supported at the moment */ hw->nvmupd_features.features[0] = 0; status = i40e_init_nvm(hw); return status; } /** * i40e_aq_mac_address_read - Retrieve the MAC addresses * @hw: pointer to the hw struct * @flags: a return indicator of what addresses were added to the addr store * @addrs: the requestor's mac addr store * @cmd_details: pointer to command details structure or NULL **/ static enum i40e_status_code i40e_aq_mac_address_read(struct i40e_hw *hw, u16 *flags, struct i40e_aqc_mac_address_read_data *addrs, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_mac_address_read *cmd_data = (struct i40e_aqc_mac_address_read *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF); status = i40e_asq_send_command(hw, &desc, addrs, sizeof(*addrs), cmd_details); *flags = LE16_TO_CPU(cmd_data->command_flags); return status; } /** * i40e_aq_mac_address_write - Change the MAC addresses * @hw: pointer to the hw struct * @flags: indicates which MAC to be written * @mac_addr: address to write * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw, u16 flags, u8 *mac_addr, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_mac_address_write *cmd_data = (struct i40e_aqc_mac_address_write *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_write); cmd_data->command_flags = CPU_TO_LE16(flags); cmd_data->mac_sah = CPU_TO_LE16((u16)mac_addr[0] << 8 | mac_addr[1]); cmd_data->mac_sal = CPU_TO_LE32(((u32)mac_addr[2] << 24) | ((u32)mac_addr[3] << 16) | ((u32)mac_addr[4] << 8) | mac_addr[5]); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_get_mac_addr - get MAC address * @hw: pointer to the HW structure * @mac_addr: pointer to MAC address * * Reads the adapter's MAC address from register **/ enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) { struct i40e_aqc_mac_address_read_data addrs; enum i40e_status_code status; u16 flags = 0; status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); if (flags & I40E_AQC_LAN_ADDR_VALID) i40e_memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac), I40E_NONDMA_TO_NONDMA); return status; } /** * i40e_get_port_mac_addr - get Port MAC address * @hw: pointer to the HW structure * @mac_addr: pointer to Port MAC address * * Reads the adapter's Port MAC address **/ enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) { struct i40e_aqc_mac_address_read_data addrs; enum i40e_status_code status; u16 flags = 0; status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); if (status) return status; if (flags & I40E_AQC_PORT_ADDR_VALID) i40e_memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac), I40E_NONDMA_TO_NONDMA); else status = I40E_ERR_INVALID_MAC_ADDR; return status; } /** * i40e_pre_tx_queue_cfg - pre tx queue configure * @hw: pointer to the HW structure * @queue: target pf queue index * @enable: state change request * * Handles hw requirement to indicate intention to enable * or disable target queue. **/ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) { u32 abs_queue_idx = hw->func_caps.base_queue + queue; u32 reg_block = 0; u32 reg_val; if (abs_queue_idx >= 128) { reg_block = abs_queue_idx / 128; abs_queue_idx %= 128; } reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); if (enable) reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK; else reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); } /** * i40e_read_pba_string - Reads part number string from EEPROM * @hw: pointer to hardware structure * @pba_num: stores the part number string from the EEPROM * @pba_num_size: part number string buffer length * * Reads the part number string from the EEPROM. **/ enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, u32 pba_num_size) { enum i40e_status_code status = I40E_SUCCESS; u16 pba_word = 0; u16 pba_size = 0; u16 pba_ptr = 0; u16 i = 0; status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); if ((status != I40E_SUCCESS) || (pba_word != 0xFAFA)) { DEBUGOUT("Failed to read PBA flags or flag is invalid.\n"); return status; } status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); if (status != I40E_SUCCESS) { DEBUGOUT("Failed to read PBA Block pointer.\n"); return status; } status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); if (status != I40E_SUCCESS) { DEBUGOUT("Failed to read PBA Block size.\n"); return status; } /* Subtract one to get PBA word count (PBA Size word is included in * total size) */ pba_size--; if (pba_num_size < (((u32)pba_size * 2) + 1)) { DEBUGOUT("Buffer to small for PBA data.\n"); return I40E_ERR_PARAM; } for (i = 0; i < pba_size; i++) { status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word); if (status != I40E_SUCCESS) { DEBUGOUT1("Failed to read PBA Block word %d.\n", i); return status; } pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; pba_num[(i * 2) + 1] = pba_word & 0xFF; } pba_num[(pba_size * 2)] = '\0'; return status; } /** * i40e_get_media_type - Gets media type * @hw: pointer to the hardware structure **/ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) { enum i40e_media_type media; switch (hw->phy.link_info.phy_type) { case I40E_PHY_TYPE_10GBASE_SR: case I40E_PHY_TYPE_10GBASE_LR: case I40E_PHY_TYPE_1000BASE_SX: case I40E_PHY_TYPE_1000BASE_LX: case I40E_PHY_TYPE_40GBASE_SR4: case I40E_PHY_TYPE_40GBASE_LR4: case I40E_PHY_TYPE_25GBASE_LR: case I40E_PHY_TYPE_25GBASE_SR: media = I40E_MEDIA_TYPE_FIBER; break; case I40E_PHY_TYPE_100BASE_TX: case I40E_PHY_TYPE_1000BASE_T: + case I40E_PHY_TYPE_2_5GBASE_T: + case I40E_PHY_TYPE_5GBASE_T: case I40E_PHY_TYPE_10GBASE_T: media = I40E_MEDIA_TYPE_BASET; break; case I40E_PHY_TYPE_10GBASE_CR1_CU: case I40E_PHY_TYPE_40GBASE_CR4_CU: case I40E_PHY_TYPE_10GBASE_CR1: case I40E_PHY_TYPE_40GBASE_CR4: case I40E_PHY_TYPE_10GBASE_SFPP_CU: case I40E_PHY_TYPE_40GBASE_AOC: case I40E_PHY_TYPE_10GBASE_AOC: case I40E_PHY_TYPE_25GBASE_CR: case I40E_PHY_TYPE_25GBASE_AOC: case I40E_PHY_TYPE_25GBASE_ACC: media = I40E_MEDIA_TYPE_DA; break; case I40E_PHY_TYPE_1000BASE_KX: case I40E_PHY_TYPE_10GBASE_KX4: case I40E_PHY_TYPE_10GBASE_KR: case I40E_PHY_TYPE_40GBASE_KR4: case I40E_PHY_TYPE_20GBASE_KR2: case I40E_PHY_TYPE_25GBASE_KR: media = I40E_MEDIA_TYPE_BACKPLANE; break; case I40E_PHY_TYPE_SGMII: case I40E_PHY_TYPE_XAUI: case I40E_PHY_TYPE_XFI: case I40E_PHY_TYPE_XLAUI: case I40E_PHY_TYPE_XLPPI: default: media = I40E_MEDIA_TYPE_UNKNOWN; break; } return media; } /** * i40e_poll_globr - Poll for Global Reset completion * @hw: pointer to the hardware structure * @retry_limit: how many times to retry before failure **/ static enum i40e_status_code i40e_poll_globr(struct i40e_hw *hw, u32 retry_limit) { u32 cnt, reg = 0; for (cnt = 0; cnt < retry_limit; cnt++) { reg = rd32(hw, I40E_GLGEN_RSTAT); if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) return I40E_SUCCESS; i40e_msec_delay(100); } DEBUGOUT("Global reset failed.\n"); DEBUGOUT1("I40E_GLGEN_RSTAT = 0x%x\n", reg); return I40E_ERR_RESET_FAILED; } #define I40E_PF_RESET_WAIT_COUNT 200 /** * i40e_pf_reset - Reset the PF * @hw: pointer to the hardware structure * * Assuming someone else has triggered a global reset, * assure the global reset is complete and then reset the PF **/ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw) { u32 cnt = 0; u32 cnt1 = 0; u32 reg = 0; u32 grst_del; /* Poll for Global Reset steady state in case of recent GRST. * The grst delay value is in 100ms units, and we'll wait a * couple counts longer to be sure we don't just miss the end. */ grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; grst_del = min(grst_del * 20, 160U); for (cnt = 0; cnt < grst_del; cnt++) { reg = rd32(hw, I40E_GLGEN_RSTAT); if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) break; i40e_msec_delay(100); } if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { DEBUGOUT("Global reset polling failed to complete.\n"); return I40E_ERR_RESET_FAILED; } /* Now Wait for the FW to be ready */ for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { reg = rd32(hw, I40E_GLNVM_ULD); reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { DEBUGOUT1("Core and Global modules ready %d\n", cnt1); break; } i40e_msec_delay(10); } if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { DEBUGOUT("wait for FW Reset complete timedout\n"); DEBUGOUT1("I40E_GLNVM_ULD = 0x%x\n", reg); return I40E_ERR_RESET_FAILED; } /* If there was a Global Reset in progress when we got here, * we don't need to do the PF Reset */ if (!cnt) { u32 reg2 = 0; reg = rd32(hw, I40E_PFGEN_CTRL); wr32(hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) { reg = rd32(hw, I40E_PFGEN_CTRL); if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) break; reg2 = rd32(hw, I40E_GLGEN_RSTAT); if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) break; i40e_msec_delay(1); } if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { if (i40e_poll_globr(hw, grst_del) != I40E_SUCCESS) return I40E_ERR_RESET_FAILED; } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { DEBUGOUT("PF reset polling failed to complete.\n"); return I40E_ERR_RESET_FAILED; } } i40e_clear_pxe_mode(hw); return I40E_SUCCESS; } /** * i40e_clear_hw - clear out any left over hw state * @hw: pointer to the hw struct * * Clear queues and interrupts, typically called at init time, * but after the capabilities have been found so we know how many * queues and msix vectors have been allocated. **/ void i40e_clear_hw(struct i40e_hw *hw) { u32 num_queues, base_queue; u32 num_pf_int; u32 num_vf_int; u32 num_vfs; u32 i, j; u32 val; u32 eol = 0x7ff; /* get number of interrupts, queues, and vfs */ val = rd32(hw, I40E_GLPCI_CNF2); num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; val = rd32(hw, I40E_PFLAN_QALLOC); base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> I40E_PFLAN_QALLOC_LASTQ_SHIFT; if (val & I40E_PFLAN_QALLOC_VALID_MASK) num_queues = (j - base_queue) + 1; else num_queues = 0; val = rd32(hw, I40E_PF_VT_PFALLOC); i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> I40E_PF_VT_PFALLOC_LASTVF_SHIFT; if (val & I40E_PF_VT_PFALLOC_VALID_MASK) num_vfs = (j - i) + 1; else num_vfs = 0; /* stop all the interrupts */ wr32(hw, I40E_PFINT_ICR0_ENA, 0); val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; for (i = 0; i < num_pf_int - 2; i++) wr32(hw, I40E_PFINT_DYN_CTLN(i), val); /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; wr32(hw, I40E_PFINT_LNKLST0, val); for (i = 0; i < num_pf_int - 2; i++) wr32(hw, I40E_PFINT_LNKLSTN(i), val); val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; for (i = 0; i < num_vfs; i++) wr32(hw, I40E_VPINT_LNKLST0(i), val); for (i = 0; i < num_vf_int - 2; i++) wr32(hw, I40E_VPINT_LNKLSTN(i), val); /* warn the HW of the coming Tx disables */ for (i = 0; i < num_queues; i++) { u32 abs_queue_idx = base_queue + i; u32 reg_block = 0; if (abs_queue_idx >= 128) { reg_block = abs_queue_idx / 128; abs_queue_idx %= 128; } val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val); } i40e_usec_delay(400); /* stop all the queues */ for (i = 0; i < num_queues; i++) { wr32(hw, I40E_QINT_TQCTL(i), 0); wr32(hw, I40E_QTX_ENA(i), 0); wr32(hw, I40E_QINT_RQCTL(i), 0); wr32(hw, I40E_QRX_ENA(i), 0); } /* short wait for all queue disables to settle */ i40e_usec_delay(50); } /** * i40e_clear_pxe_mode - clear pxe operations mode * @hw: pointer to the hw struct * * Make sure all PXE mode settings are cleared, including things * like descriptor fetch/write-back mode. **/ void i40e_clear_pxe_mode(struct i40e_hw *hw) { if (i40e_check_asq_alive(hw)) i40e_aq_clear_pxe_mode(hw, NULL); } /** * i40e_led_is_mine - helper to find matching led * @hw: pointer to the hw struct * @idx: index into GPIO registers * * returns: 0 if no match, otherwise the value of the GPIO_CTL register */ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) { u32 gpio_val = 0; u32 port; - if (!hw->func_caps.led[idx]) + if (!I40E_IS_X710TL_DEVICE(hw->device_id) && + !hw->func_caps.led[idx]) return 0; gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; /* if PRT_NUM_NA is 1 then this LED is not port specific, OR * if it is not our port then ignore */ if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || (port != hw->port)) return 0; return gpio_val; } #define I40E_COMBINED_ACTIVITY 0xA #define I40E_FILTER_ACTIVITY 0xE #define I40E_LINK_ACTIVITY 0xC #define I40E_MAC_ACTIVITY 0xD #define I40E_FW_LED BIT(4) #define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \ I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) #define I40E_LED0 22 #define I40E_PIN_FUNC_SDP 0x0 #define I40E_PIN_FUNC_LED 0x1 /** * i40e_led_get - return current on/off mode * @hw: pointer to the hw struct * * The value returned is the 'mode' field as defined in the * GPIO register definitions: 0x0 = off, 0xf = on, and other * values are variations of possible behaviors relating to * blink, link, and wire. **/ u32 i40e_led_get(struct i40e_hw *hw) { u32 current_mode = 0; u32 mode = 0; int i; /* as per the documentation GPIO 22-29 are the LED * GPIO pins named LED0..LED7 */ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { u32 gpio_val = i40e_led_is_mine(hw, i); if (!gpio_val) continue; /* ignore gpio LED src mode entries related to the activity * LEDs */ current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); switch (current_mode) { case I40E_COMBINED_ACTIVITY: case I40E_FILTER_ACTIVITY: case I40E_MAC_ACTIVITY: case I40E_LINK_ACTIVITY: continue; default: break; } mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; break; } return mode; } /** * i40e_led_set - set new on/off mode * @hw: pointer to the hw struct * @mode: 0=off, 0xf=on (else see manual for mode details) * @blink: TRUE if the LED should blink when on, FALSE if steady * * if this function is used to turn on the blink it should * be used to disable the blink when restoring the original state. **/ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) { u32 current_mode = 0; int i; if (mode & ~I40E_LED_MODE_VALID) { DEBUGOUT1("invalid mode passed in %X\n", mode); return; } /* as per the documentation GPIO 22-29 are the LED * GPIO pins named LED0..LED7 */ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { u32 gpio_val = i40e_led_is_mine(hw, i); if (!gpio_val) continue; /* ignore gpio LED src mode entries related to the activity * LEDs */ current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); switch (current_mode) { case I40E_COMBINED_ACTIVITY: case I40E_FILTER_ACTIVITY: case I40E_MAC_ACTIVITY: case I40E_LINK_ACTIVITY: continue; default: break; } + if (I40E_IS_X710TL_DEVICE(hw->device_id)) { + u32 pin_func = 0; + + if (mode & I40E_FW_LED) + pin_func = I40E_PIN_FUNC_SDP; + else + pin_func = I40E_PIN_FUNC_LED; + + gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK; + gpio_val |= ((pin_func << + I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) & + I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK); + } gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; /* this & is a bit of paranoia, but serves as a range check */ gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); if (blink) gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); else gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); break; } } /* Admin command wrappers */ /** * i40e_aq_get_phy_capabilities * @hw: pointer to the hw struct * @abilities: structure for PHY capabilities to be filled * @qualified_modules: report Qualified Modules * @report_init: report init capabilities (active are default) * @cmd_details: pointer to command details structure or NULL * * Returns the various PHY abilities supported on the Port. **/ enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw, bool qualified_modules, bool report_init, struct i40e_aq_get_phy_abilities_resp *abilities, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; enum i40e_status_code status; u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0; u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); if (!abilities) return I40E_ERR_PARAM; do { i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_phy_abilities); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (abilities_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); if (qualified_modules) desc.params.external.param0 |= CPU_TO_LE32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); if (report_init) desc.params.external.param0 |= CPU_TO_LE32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); status = i40e_asq_send_command(hw, &desc, abilities, abilities_size, cmd_details); switch (hw->aq.asq_last_status) { case I40E_AQ_RC_EIO: status = I40E_ERR_UNKNOWN_PHY; break; case I40E_AQ_RC_EAGAIN: i40e_msec_delay(1); total_delay++; status = I40E_ERR_TIMEOUT; break; /* also covers I40E_AQ_RC_OK */ default: break; } } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) && (total_delay < max_delay)); if (status != I40E_SUCCESS) return status; if (report_init) { if (hw->mac.type == I40E_MAC_XL710 && hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { status = i40e_aq_get_link_info(hw, TRUE, NULL, NULL); } else { hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type); hw->phy.phy_types |= ((u64)abilities->phy_type_ext << 32); } } return status; } /** * i40e_aq_set_phy_config * @hw: pointer to the hw struct * @config: structure with PHY configuration to be set * @cmd_details: pointer to command details structure or NULL * * Set the various PHY configuration parameters * supported on the Port.One or more of the Set PHY config parameters may be * ignored in an MFP mode as the PF may not have the privilege to set some * of the PHY Config parameters. This status will be indicated by the * command response. **/ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, struct i40e_aq_set_phy_config *config, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aq_set_phy_config *cmd = (struct i40e_aq_set_phy_config *)&desc.params.raw; enum i40e_status_code status; if (!config) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_phy_config); *cmd = *config; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_set_fc * @hw: pointer to the hw struct * @aq_failures: buffer to return AdminQ failure information * @atomic_restart: whether to enable atomic link restart * * Set the requested flow control mode using set_phy_config. **/ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, bool atomic_restart) { enum i40e_fc_mode fc_mode = hw->fc.requested_mode; struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_aq_set_phy_config config; enum i40e_status_code status; u8 pause_mask = 0x0; *aq_failures = 0x0; switch (fc_mode) { case I40E_FC_FULL: pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; break; case I40E_FC_RX_PAUSE: pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; break; case I40E_FC_TX_PAUSE: pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; break; default: break; } /* Get the current phy config */ status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities, NULL); if (status) { *aq_failures |= I40E_SET_FC_AQ_FAIL_GET; return status; } memset(&config, 0, sizeof(config)); /* clear the old pause settings */ config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & ~(I40E_AQ_PHY_FLAG_PAUSE_RX); /* set the new abilities */ config.abilities |= pause_mask; /* If the abilities have changed, then set the new config */ if (config.abilities != abilities.abilities) { /* Auto restart link so settings take effect */ if (atomic_restart) config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; /* Copy over all the old settings */ config.phy_type = abilities.phy_type; config.phy_type_ext = abilities.phy_type_ext; config.link_speed = abilities.link_speed; config.eee_capability = abilities.eee_capability; config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; config.fec_config = abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_PHY_FEC_CONFIG_MASK; status = i40e_aq_set_phy_config(hw, &config, NULL); if (status) *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; } /* Update the link info */ status = i40e_update_link_info(hw); if (status) { /* Wait a little bit (on 40G cards it sometimes takes a really * long time for link to come back from the atomic reset) * and try once more */ i40e_msec_delay(1000); status = i40e_update_link_info(hw); } if (status) *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; return status; } /** * i40e_aq_set_mac_config * @hw: pointer to the hw struct * @max_frame_size: Maximum Frame Size to be supported by the port * @crc_en: Tell HW to append a CRC to outgoing frames * @pacing: Pacing configurations * @auto_drop_blocking_packets: Tell HW to drop packets if TC queue is blocked * @cmd_details: pointer to command details structure or NULL * * Configure MAC settings for frame size, jumbo frame support and the * addition of a CRC by the hardware. **/ enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw, u16 max_frame_size, bool crc_en, u16 pacing, bool auto_drop_blocking_packets, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aq_set_mac_config *cmd = (struct i40e_aq_set_mac_config *)&desc.params.raw; enum i40e_status_code status; if (max_frame_size == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_mac_config); cmd->max_frame_size = CPU_TO_LE16(max_frame_size); cmd->params = ((u8)pacing & 0x0F) << 3; if (crc_en) cmd->params |= I40E_AQ_SET_MAC_CONFIG_CRC_EN; if (auto_drop_blocking_packets) { if (hw->flags & I40E_HW_FLAG_DROP_MODE) cmd->params |= I40E_AQ_SET_MAC_CONFIG_DROP_BLOCKING_PACKET_EN; else i40e_debug(hw, I40E_DEBUG_ALL, "This FW api version does not support drop mode.\n"); } #define I40E_AQ_SET_MAC_CONFIG_FC_DEFAULT_THRESHOLD 0x7FFF cmd->fc_refresh_threshold = CPU_TO_LE16(I40E_AQ_SET_MAC_CONFIG_FC_DEFAULT_THRESHOLD); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_clear_pxe_mode * @hw: pointer to the hw struct * @cmd_details: pointer to command details structure or NULL * * Tell the firmware that the driver is taking over from PXE **/ enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details) { enum i40e_status_code status; struct i40e_aq_desc desc; struct i40e_aqc_clear_pxe *cmd = (struct i40e_aqc_clear_pxe *)&desc.params.raw; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_clear_pxe_mode); cmd->rx_cnt = 0x2; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); wr32(hw, I40E_GLLAN_RCTL_0, 0x1); return status; } /** * i40e_aq_set_link_restart_an * @hw: pointer to the hw struct * @enable_link: if TRUE: enable link, if FALSE: disable link * @cmd_details: pointer to command details structure or NULL * * Sets up the link and restarts the Auto-Negotiation over the link. **/ enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw, bool enable_link, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_link_restart_an *cmd = (struct i40e_aqc_set_link_restart_an *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_link_restart_an); cmd->command = I40E_AQ_PHY_RESTART_AN; if (enable_link) cmd->command |= I40E_AQ_PHY_LINK_ENABLE; else cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_link_info * @hw: pointer to the hw struct * @enable_lse: enable/disable LinkStatusEvent reporting * @link: pointer to link status structure - optional * @cmd_details: pointer to command details structure or NULL * * Returns the link status of the adapter. **/ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw, bool enable_lse, struct i40e_link_status *link, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_link_status *resp = (struct i40e_aqc_get_link_status *)&desc.params.raw; struct i40e_link_status *hw_link_info = &hw->phy.link_info; enum i40e_status_code status; bool tx_pause, rx_pause; u16 command_flags; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); if (enable_lse) command_flags = I40E_AQ_LSE_ENABLE; else command_flags = I40E_AQ_LSE_DISABLE; resp->command_flags = CPU_TO_LE16(command_flags); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status != I40E_SUCCESS) goto aq_get_link_info_exit; /* save off old link status information */ i40e_memcpy(&hw->phy.link_info_old, hw_link_info, sizeof(*hw_link_info), I40E_NONDMA_TO_NONDMA); /* update link status */ hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; hw->phy.media_type = i40e_get_media_type(hw); hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; hw_link_info->link_info = resp->link_info; hw_link_info->an_info = resp->an_info; hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA | I40E_AQ_CONFIG_FEC_RS_ENA); hw_link_info->ext_info = resp->ext_info; hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK; hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size); hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; /* update fc info */ tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX); rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX); if (tx_pause & rx_pause) hw->fc.current_mode = I40E_FC_FULL; else if (tx_pause) hw->fc.current_mode = I40E_FC_TX_PAUSE; else if (rx_pause) hw->fc.current_mode = I40E_FC_RX_PAUSE; else hw->fc.current_mode = I40E_FC_NONE; if (resp->config & I40E_AQ_CONFIG_CRC_ENA) hw_link_info->crc_enable = TRUE; else hw_link_info->crc_enable = FALSE; if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_IS_ENABLED)) hw_link_info->lse_enable = TRUE; else hw_link_info->lse_enable = FALSE; if ((hw->mac.type == I40E_MAC_XL710) && (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE && hw->mac.type != I40E_MAC_X722) { __le32 tmp; i40e_memcpy(&tmp, resp->link_type, sizeof(tmp), I40E_NONDMA_TO_NONDMA); hw->phy.phy_types = LE32_TO_CPU(tmp); hw->phy.phy_types |= ((u64)resp->link_type_ext << 32); } /* save link status information */ if (link) i40e_memcpy(link, hw_link_info, sizeof(*hw_link_info), I40E_NONDMA_TO_NONDMA); /* flag cleared so helper functions don't call AQ again */ hw->phy.get_link_info = FALSE; aq_get_link_info_exit: return status; } /** * i40e_aq_set_phy_int_mask * @hw: pointer to the hw struct * @mask: interrupt mask to be set * @cmd_details: pointer to command details structure or NULL * * Set link interrupt mask. **/ enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_phy_int_mask *cmd = (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_phy_int_mask); cmd->event_mask = CPU_TO_LE16(mask); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_local_advt_reg * @hw: pointer to the hw struct * @advt_reg: local AN advertisement register value * @cmd_details: pointer to command details structure or NULL * * Get the Local AN advertisement register value. **/ enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw, u64 *advt_reg, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_an_advt_reg *resp = (struct i40e_aqc_an_advt_reg *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_local_advt_reg); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status != I40E_SUCCESS) goto aq_get_local_advt_reg_exit; *advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32; *advt_reg |= LE32_TO_CPU(resp->local_an_reg0); aq_get_local_advt_reg_exit: return status; } /** * i40e_aq_set_local_advt_reg * @hw: pointer to the hw struct * @advt_reg: local AN advertisement register value * @cmd_details: pointer to command details structure or NULL * * Get the Local AN advertisement register value. **/ enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw, u64 advt_reg, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_an_advt_reg *cmd = (struct i40e_aqc_an_advt_reg *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_local_advt_reg); cmd->local_an_reg0 = CPU_TO_LE32(I40E_LO_DWORD(advt_reg)); cmd->local_an_reg1 = CPU_TO_LE16(I40E_HI_DWORD(advt_reg)); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_partner_advt * @hw: pointer to the hw struct * @advt_reg: AN partner advertisement register value * @cmd_details: pointer to command details structure or NULL * * Get the link partner AN advertisement register value. **/ enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw, u64 *advt_reg, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_an_advt_reg *resp = (struct i40e_aqc_an_advt_reg *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_partner_advt); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status != I40E_SUCCESS) goto aq_get_partner_advt_exit; *advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32; *advt_reg |= LE32_TO_CPU(resp->local_an_reg0); aq_get_partner_advt_exit: return status; } /** * i40e_aq_set_lb_modes * @hw: pointer to the hw struct * @lb_modes: loopback mode to be set * @cmd_details: pointer to command details structure or NULL * * Sets loopback modes. **/ enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw, u8 lb_level, u8 lb_type, u8 speed, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_lb_mode *cmd = (struct i40e_aqc_set_lb_mode *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_lb_modes); cmd->lb_level = lb_level; cmd->lb_type = lb_type; cmd->speed = speed; if (speed) cmd->force_speed = 1; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_phy_debug * @hw: pointer to the hw struct * @cmd_flags: debug command flags * @cmd_details: pointer to command details structure or NULL * * Reset the external PHY. **/ enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_phy_debug *cmd = (struct i40e_aqc_set_phy_debug *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_phy_debug); cmd->command_flags = cmd_flags; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_add_vsi * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct * @cmd_details: pointer to command details structure or NULL * * Add a VSI context to the hardware. **/ enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_get_update_vsi *cmd = (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; struct i40e_aqc_add_get_update_vsi_completion *resp = (struct i40e_aqc_add_get_update_vsi_completion *) &desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vsi); cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->uplink_seid); cmd->connection_type = vsi_ctx->connection_type; cmd->vf_id = vsi_ctx->vf_num; cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), cmd_details); if (status != I40E_SUCCESS) goto aq_add_vsi_exit; vsi_ctx->seid = LE16_TO_CPU(resp->seid); vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number); vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used); vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free); aq_add_vsi_exit: return status; } /** * i40e_aq_set_default_vsi * @hw: pointer to the hw struct * @seid: vsi number * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 seid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *) &desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); cmd->promiscuous_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT); cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_clear_default_vsi * @hw: pointer to the hw struct * @seid: vsi number * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 seid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *) &desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); cmd->promiscuous_flags = CPU_TO_LE16(0); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT); cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_unicast_promiscuous * @hw: pointer to the hw struct * @seid: vsi number * @set: set unicast promiscuous enable/disable * @cmd_details: pointer to command details structure or NULL * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc **/ enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details, bool rx_only_promisc) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; enum i40e_status_code status; u16 flags = 0; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (set) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; if (rx_only_promisc && (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || (hw->aq.api_maj_ver > 1))) flags |= I40E_AQC_SET_VSI_PROMISC_TX; } cmd->promiscuous_flags = CPU_TO_LE16(flags); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST); if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) || (hw->aq.api_maj_ver > 1)) cmd->valid_flags |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_TX); cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_multicast_promiscuous * @hw: pointer to the hw struct * @seid: vsi number * @set: set multicast promiscuous enable/disable * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; enum i40e_status_code status; u16 flags = 0; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (set) flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; cmd->promiscuous_flags = CPU_TO_LE16(flags); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_full_promiscuous * @hw: pointer to the hw struct * @seid: VSI number * @set: set promiscuous enable/disable * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw, u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; enum i40e_status_code status; u16 flags = 0; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (set) flags = I40E_AQC_SET_VSI_PROMISC_UNICAST | I40E_AQC_SET_VSI_PROMISC_MULTICAST | I40E_AQC_SET_VSI_PROMISC_BROADCAST; cmd->promiscuous_flags = CPU_TO_LE16(flags); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST | I40E_AQC_SET_VSI_PROMISC_MULTICAST | I40E_AQC_SET_VSI_PROMISC_BROADCAST); cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_mc_promisc_on_vlan * @hw: pointer to the hw struct * @seid: vsi number * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, u16 seid, bool enable, u16 vid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; enum i40e_status_code status; u16 flags = 0; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (enable) flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; cmd->promiscuous_flags = CPU_TO_LE16(flags); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); cmd->seid = CPU_TO_LE16(seid); cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_uc_promisc_on_vlan * @hw: pointer to the hw struct * @seid: vsi number * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, u16 seid, bool enable, u16 vid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; enum i40e_status_code status; u16 flags = 0; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (enable) flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; cmd->promiscuous_flags = CPU_TO_LE16(flags); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST); cmd->seid = CPU_TO_LE16(seid); cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_bc_promisc_on_vlan * @hw: pointer to the hw struct * @seid: vsi number * @enable: set broadcast promiscuous enable/disable for a given VLAN * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, u16 seid, bool enable, u16 vid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; enum i40e_status_code status; u16 flags = 0; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (enable) flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST; cmd->promiscuous_flags = CPU_TO_LE16(flags); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); cmd->seid = CPU_TO_LE16(seid); cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_broadcast * @hw: pointer to the hw struct * @seid: vsi number * @set_filter: TRUE to set filter, FALSE to clear filter * @cmd_details: pointer to command details structure or NULL * * Set or clear the broadcast promiscuous flag (filter) for a given VSI. **/ enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, u16 seid, bool set_filter, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (set_filter) cmd->promiscuous_flags |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); else cmd->promiscuous_flags &= CPU_TO_LE16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting * @hw: pointer to the hw struct * @seid: vsi number * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, u16 seid, bool enable, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; enum i40e_status_code status; u16 flags = 0; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (enable) flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; cmd->promiscuous_flags = CPU_TO_LE16(flags); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_VLAN); cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_get_vsi_params - get VSI configuration info * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_get_update_vsi *cmd = (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; struct i40e_aqc_add_get_update_vsi_completion *resp = (struct i40e_aqc_add_get_update_vsi_completion *) &desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_vsi_parameters); cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), NULL); if (status != I40E_SUCCESS) goto aq_get_vsi_params_exit; vsi_ctx->seid = LE16_TO_CPU(resp->seid); vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number); vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used); vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free); aq_get_vsi_params_exit: return status; } /** * i40e_aq_update_vsi_params * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct * @cmd_details: pointer to command details structure or NULL * * Update a VSI context. **/ enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_get_update_vsi *cmd = (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; struct i40e_aqc_add_get_update_vsi_completion *resp = (struct i40e_aqc_add_get_update_vsi_completion *) &desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_update_vsi_parameters); cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), cmd_details); vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used); vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free); return status; } /** * i40e_aq_get_switch_config * @hw: pointer to the hardware structure * @buf: pointer to the result buffer * @buf_size: length of input buffer * @start_seid: seid to start for the report, 0 == beginning * @cmd_details: pointer to command details structure or NULL * * Fill the buf with switch configuration returned from AdminQ command **/ enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw, struct i40e_aqc_get_switch_config_resp *buf, u16 buf_size, u16 *start_seid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_switch_seid *scfg = (struct i40e_aqc_switch_seid *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_switch_config); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); scfg->seid = CPU_TO_LE16(*start_seid); status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details); *start_seid = LE16_TO_CPU(scfg->seid); return status; } /** * i40e_aq_set_switch_config * @hw: pointer to the hardware structure * @flags: bit flag values to set * @mode: cloud filter mode * @valid_flags: which bit flags to set * @cmd_details: pointer to command details structure or NULL * * Set switch configuration bits **/ enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, u16 flags, u16 valid_flags, u8 mode, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_switch_config *scfg = (struct i40e_aqc_set_switch_config *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_switch_config); scfg->flags = CPU_TO_LE16(flags); scfg->valid_flags = CPU_TO_LE16(valid_flags); scfg->mode = mode; if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { scfg->switch_tag = CPU_TO_LE16(hw->switch_tag); scfg->first_tag = CPU_TO_LE16(hw->first_tag); scfg->second_tag = CPU_TO_LE16(hw->second_tag); } status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_firmware_version * @hw: pointer to the hw struct * @fw_major_version: firmware major version * @fw_minor_version: firmware minor version * @fw_build: firmware build number * @api_major_version: major queue version * @api_minor_version: minor queue version * @cmd_details: pointer to command details structure or NULL * * Get the firmware version from the admin queue commands **/ enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw, u16 *fw_major_version, u16 *fw_minor_version, u32 *fw_build, u16 *api_major_version, u16 *api_minor_version, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_version *resp = (struct i40e_aqc_get_version *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status == I40E_SUCCESS) { if (fw_major_version != NULL) *fw_major_version = LE16_TO_CPU(resp->fw_major); if (fw_minor_version != NULL) *fw_minor_version = LE16_TO_CPU(resp->fw_minor); if (fw_build != NULL) *fw_build = LE32_TO_CPU(resp->fw_build); if (api_major_version != NULL) *api_major_version = LE16_TO_CPU(resp->api_major); if (api_minor_version != NULL) *api_minor_version = LE16_TO_CPU(resp->api_minor); /* A workaround to fix the API version in SW */ if (api_major_version && api_minor_version && fw_major_version && fw_minor_version && ((*api_major_version == 1) && (*api_minor_version == 1)) && (((*fw_major_version == 4) && (*fw_minor_version >= 2)) || (*fw_major_version > 4))) *api_minor_version = 2; } return status; } /** * i40e_aq_send_driver_version * @hw: pointer to the hw struct * @dv: driver's major, minor version * @cmd_details: pointer to command details structure or NULL * * Send the driver version to the firmware **/ enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw, struct i40e_driver_version *dv, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_driver_version *cmd = (struct i40e_aqc_driver_version *)&desc.params.raw; enum i40e_status_code status; u16 len; if (dv == NULL) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); cmd->driver_major_ver = dv->major_version; cmd->driver_minor_ver = dv->minor_version; cmd->driver_build_ver = dv->build_version; cmd->driver_subbuild_ver = dv->subbuild_version; len = 0; while (len < sizeof(dv->driver_string) && (dv->driver_string[len] < 0x80) && dv->driver_string[len]) len++; status = i40e_asq_send_command(hw, &desc, dv->driver_string, len, cmd_details); return status; } /** * i40e_get_link_status - get status of the HW network link * @hw: pointer to the hw struct * @link_up: pointer to bool (TRUE/FALSE = linkup/linkdown) * * Variable link_up TRUE if link is up, FALSE if link is down. * The variable link_up is invalid if returned value of status != I40E_SUCCESS * * Side effect: LinkStatusEvent reporting becomes enabled **/ enum i40e_status_code i40e_get_link_status(struct i40e_hw *hw, bool *link_up) { enum i40e_status_code status = I40E_SUCCESS; if (hw->phy.get_link_info) { status = i40e_update_link_info(hw); if (status != I40E_SUCCESS) i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n", status); } *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; return status; } /** * i40e_updatelink_status - update status of the HW network link * @hw: pointer to the hw struct **/ enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw) { struct i40e_aq_get_phy_abilities_resp abilities; enum i40e_status_code status = I40E_SUCCESS; status = i40e_aq_get_link_info(hw, TRUE, NULL, NULL); if (status) return status; /* extra checking needed to ensure link info to user is timely */ if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) && ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) || !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) { status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities, NULL); if (status) return status; if (abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_ENABLE_FEC_AUTO) hw->phy.link_info.req_fec_info = (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS); else hw->phy.link_info.req_fec_info = abilities.fec_cfg_curr_mod_ext_info & (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS); i40e_memcpy(hw->phy.link_info.module_type, &abilities.module_type, sizeof(hw->phy.link_info.module_type), I40E_NONDMA_TO_NONDMA); } return status; } /** * i40e_get_link_speed * @hw: pointer to the hw struct * * Returns the link speed of the adapter. **/ enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw) { enum i40e_aq_link_speed speed = I40E_LINK_SPEED_UNKNOWN; enum i40e_status_code status = I40E_SUCCESS; if (hw->phy.get_link_info) { status = i40e_aq_get_link_info(hw, TRUE, NULL, NULL); if (status != I40E_SUCCESS) goto i40e_link_speed_exit; } speed = hw->phy.link_info.link_speed; i40e_link_speed_exit: return speed; } /** * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC * @hw: pointer to the hw struct * @uplink_seid: the MAC or other gizmo SEID * @downlink_seid: the VSI SEID * @enabled_tc: bitmap of TCs to be enabled * @default_port: TRUE for default port VSI, FALSE for control port * @veb_seid: pointer to where to put the resulting VEB SEID * @enable_stats: TRUE to turn on VEB stats * @cmd_details: pointer to command details structure or NULL * * This asks the FW to add a VEB between the uplink and downlink * elements. If the uplink SEID is 0, this will be a floating VEB. **/ enum i40e_status_code i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, u16 downlink_seid, u8 enabled_tc, bool default_port, u16 *veb_seid, bool enable_stats, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_veb *cmd = (struct i40e_aqc_add_veb *)&desc.params.raw; struct i40e_aqc_add_veb_completion *resp = (struct i40e_aqc_add_veb_completion *)&desc.params.raw; enum i40e_status_code status; u16 veb_flags = 0; /* SEIDs need to either both be set or both be 0 for floating VEB */ if (!!uplink_seid != !!downlink_seid) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); cmd->uplink_seid = CPU_TO_LE16(uplink_seid); cmd->downlink_seid = CPU_TO_LE16(downlink_seid); cmd->enable_tcs = enabled_tc; if (!uplink_seid) veb_flags |= I40E_AQC_ADD_VEB_FLOATING; if (default_port) veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT; else veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; /* reverse logic here: set the bitflag to disable the stats */ if (!enable_stats) veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS; cmd->veb_flags = CPU_TO_LE16(veb_flags); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status && veb_seid) *veb_seid = LE16_TO_CPU(resp->veb_seid); return status; } /** * i40e_aq_get_veb_parameters - Retrieve VEB parameters * @hw: pointer to the hw struct * @veb_seid: the SEID of the VEB to query * @switch_id: the uplink switch id * @floating: set to TRUE if the VEB is floating * @statistic_index: index of the stats counter block for this VEB * @vebs_used: number of VEB's used by function * @vebs_free: total VEB's not reserved by any function * @cmd_details: pointer to command details structure or NULL * * This retrieves the parameters for a particular VEB, specified by * uplink_seid, and returns them to the caller. **/ enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw, u16 veb_seid, u16 *switch_id, bool *floating, u16 *statistic_index, u16 *vebs_used, u16 *vebs_free, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_veb_parameters_completion *cmd_resp = (struct i40e_aqc_get_veb_parameters_completion *) &desc.params.raw; enum i40e_status_code status; if (veb_seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_veb_parameters); cmd_resp->seid = CPU_TO_LE16(veb_seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status) goto get_veb_exit; if (switch_id) *switch_id = LE16_TO_CPU(cmd_resp->switch_id); if (statistic_index) *statistic_index = LE16_TO_CPU(cmd_resp->statistic_index); if (vebs_used) *vebs_used = LE16_TO_CPU(cmd_resp->vebs_used); if (vebs_free) *vebs_free = LE16_TO_CPU(cmd_resp->vebs_free); if (floating) { u16 flags = LE16_TO_CPU(cmd_resp->veb_flags); if (flags & I40E_AQC_ADD_VEB_FLOATING) *floating = TRUE; else *floating = FALSE; } get_veb_exit: return status; } /** * i40e_aq_add_macvlan * @hw: pointer to the hw struct * @seid: VSI for the mac address * @mv_list: list of macvlans to be added * @count: length of the list * @cmd_details: pointer to command details structure or NULL * * Add MAC/VLAN addresses to the HW filtering **/ enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, struct i40e_aqc_add_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_macvlan *cmd = (struct i40e_aqc_macvlan *)&desc.params.raw; enum i40e_status_code status; u16 buf_size; int i; if (count == 0 || !mv_list || !hw) return I40E_ERR_PARAM; buf_size = count * sizeof(*mv_list); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan); cmd->num_addresses = CPU_TO_LE16(count); cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); cmd->seid[1] = 0; cmd->seid[2] = 0; for (i = 0; i < count; i++) if (I40E_IS_MULTICAST(mv_list[i].mac_addr)) mv_list[i].flags |= CPU_TO_LE16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, cmd_details); return status; } /** * i40e_aq_remove_macvlan * @hw: pointer to the hw struct * @seid: VSI for the mac address * @mv_list: list of macvlans to be removed * @count: length of the list * @cmd_details: pointer to command details structure or NULL * * Remove MAC/VLAN addresses from the HW filtering **/ enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, struct i40e_aqc_remove_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_macvlan *cmd = (struct i40e_aqc_macvlan *)&desc.params.raw; enum i40e_status_code status; u16 buf_size; if (count == 0 || !mv_list || !hw) return I40E_ERR_PARAM; buf_size = count * sizeof(*mv_list); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); cmd->num_addresses = CPU_TO_LE16(count); cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); cmd->seid[1] = 0; cmd->seid[2] = 0; desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, cmd_details); return status; } /** * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule * @hw: pointer to the hw struct * @opcode: AQ opcode for add or delete mirror rule * @sw_seid: Switch SEID (to which rule refers) * @rule_type: Rule Type (ingress/egress/VLAN) * @id: Destination VSI SEID or Rule ID * @count: length of the list * @mr_list: list of mirrored VSI SEIDs or VLAN IDs * @cmd_details: pointer to command details structure or NULL * @rule_id: Rule ID returned from FW * @rules_used: Number of rules used in internal switch * @rules_free: Number of rules free in internal switch * * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for * VEBs/VEPA elements only **/ static enum i40e_status_code i40e_mirrorrule_op(struct i40e_hw *hw, u16 opcode, u16 sw_seid, u16 rule_type, u16 id, u16 count, __le16 *mr_list, struct i40e_asq_cmd_details *cmd_details, u16 *rule_id, u16 *rules_used, u16 *rules_free) { struct i40e_aq_desc desc; struct i40e_aqc_add_delete_mirror_rule *cmd = (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; struct i40e_aqc_add_delete_mirror_rule_completion *resp = (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; enum i40e_status_code status; u16 buf_size; buf_size = count * sizeof(*mr_list); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, opcode); cmd->seid = CPU_TO_LE16(sw_seid); cmd->rule_type = CPU_TO_LE16(rule_type & I40E_AQC_MIRROR_RULE_TYPE_MASK); cmd->num_entries = CPU_TO_LE16(count); /* Dest VSI for add, rule_id for delete */ cmd->destination = CPU_TO_LE16(id); if (mr_list) { desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); } status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, cmd_details); if (status == I40E_SUCCESS || hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { if (rule_id) *rule_id = LE16_TO_CPU(resp->rule_id); if (rules_used) *rules_used = LE16_TO_CPU(resp->mirror_rules_used); if (rules_free) *rules_free = LE16_TO_CPU(resp->mirror_rules_free); } return status; } /** * i40e_aq_add_mirrorrule - add a mirror rule * @hw: pointer to the hw struct * @sw_seid: Switch SEID (to which rule refers) * @rule_type: Rule Type (ingress/egress/VLAN) * @dest_vsi: SEID of VSI to which packets will be mirrored * @count: length of the list * @mr_list: list of mirrored VSI SEIDs or VLAN IDs * @cmd_details: pointer to command details structure or NULL * @rule_id: Rule ID returned from FW * @rules_used: Number of rules used in internal switch * @rules_free: Number of rules free in internal switch * * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only **/ enum i40e_status_code i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, struct i40e_asq_cmd_details *cmd_details, u16 *rule_id, u16 *rules_used, u16 *rules_free) { if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { if (count == 0 || !mr_list) return I40E_ERR_PARAM; } return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, rule_type, dest_vsi, count, mr_list, cmd_details, rule_id, rules_used, rules_free); } /** * i40e_aq_delete_mirrorrule - delete a mirror rule * @hw: pointer to the hw struct * @sw_seid: Switch SEID (to which rule refers) * @rule_type: Rule Type (ingress/egress/VLAN) * @count: length of the list * @rule_id: Rule ID that is returned in the receive desc as part of * add_mirrorrule. * @mr_list: list of mirrored VLAN IDs to be removed * @cmd_details: pointer to command details structure or NULL * @rules_used: Number of rules used in internal switch * @rules_free: Number of rules free in internal switch * * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only **/ enum i40e_status_code i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, struct i40e_asq_cmd_details *cmd_details, u16 *rules_used, u16 *rules_free) { /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { /* count and mr_list shall be valid for rule_type INGRESS VLAN * mirroring. For other rule_type, count and rule_type should * not matter. */ if (count == 0 || !mr_list) return I40E_ERR_PARAM; } return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, rule_type, rule_id, count, mr_list, cmd_details, NULL, rules_used, rules_free); } /** * i40e_aq_add_vlan - Add VLAN ids to the HW filtering * @hw: pointer to the hw struct * @seid: VSI for the vlan filters * @v_list: list of vlan filters to be added * @count: length of the list * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid, struct i40e_aqc_add_remove_vlan_element_data *v_list, u8 count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_macvlan *cmd = (struct i40e_aqc_macvlan *)&desc.params.raw; enum i40e_status_code status; u16 buf_size; if (count == 0 || !v_list || !hw) return I40E_ERR_PARAM; buf_size = count * sizeof(*v_list); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan); cmd->num_addresses = CPU_TO_LE16(count); cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID); cmd->seid[1] = 0; cmd->seid[2] = 0; desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, v_list, buf_size, cmd_details); return status; } /** * i40e_aq_remove_vlan - Remove VLANs from the HW filtering * @hw: pointer to the hw struct * @seid: VSI for the vlan filters * @v_list: list of macvlans to be removed * @count: length of the list * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid, struct i40e_aqc_add_remove_vlan_element_data *v_list, u8 count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_macvlan *cmd = (struct i40e_aqc_macvlan *)&desc.params.raw; enum i40e_status_code status; u16 buf_size; if (count == 0 || !v_list || !hw) return I40E_ERR_PARAM; buf_size = count * sizeof(*v_list); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan); cmd->num_addresses = CPU_TO_LE16(count); cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID); cmd->seid[1] = 0; cmd->seid[2] = 0; desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, v_list, buf_size, cmd_details); return status; } /** * i40e_aq_send_msg_to_vf * @hw: pointer to the hardware structure * @vfid: vf id to send msg * @v_opcode: opcodes for VF-PF communication * @v_retval: return error code * @msg: pointer to the msg buffer * @msglen: msg length * @cmd_details: pointer to command details * * send msg to vf **/ enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_pf_vf_message *cmd = (struct i40e_aqc_pf_vf_message *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); cmd->id = CPU_TO_LE32(vfid); desc.cookie_high = CPU_TO_LE32(v_opcode); desc.cookie_low = CPU_TO_LE32(v_retval); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI); if (msglen) { desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (msglen > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); desc.datalen = CPU_TO_LE16(msglen); } status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details); return status; } /** * i40e_aq_debug_read_register * @hw: pointer to the hw struct * @reg_addr: register address * @reg_val: register value * @cmd_details: pointer to command details structure or NULL * * Read the register using the admin queue commands **/ enum i40e_status_code i40e_aq_debug_read_register(struct i40e_hw *hw, u32 reg_addr, u64 *reg_val, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_debug_reg_read_write *cmd_resp = (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; enum i40e_status_code status; if (reg_val == NULL) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); cmd_resp->address = CPU_TO_LE32(reg_addr); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status == I40E_SUCCESS) { *reg_val = ((u64)LE32_TO_CPU(cmd_resp->value_high) << 32) | (u64)LE32_TO_CPU(cmd_resp->value_low); } return status; } /** * i40e_aq_debug_write_register * @hw: pointer to the hw struct * @reg_addr: register address * @reg_val: register value * @cmd_details: pointer to command details structure or NULL * * Write to a register using the admin queue commands **/ enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw, u32 reg_addr, u64 reg_val, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_debug_reg_read_write *cmd = (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); cmd->address = CPU_TO_LE32(reg_addr); cmd->value_high = CPU_TO_LE32((u32)(reg_val >> 32)); cmd->value_low = CPU_TO_LE32((u32)(reg_val & 0xFFFFFFFF)); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_request_resource * @hw: pointer to the hw struct * @resource: resource id * @access: access type * @sdp_number: resource number * @timeout: the maximum time in ms that the driver may hold the resource * @cmd_details: pointer to command details structure or NULL * * requests common resource using the admin queue commands **/ enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw, enum i40e_aq_resources_ids resource, enum i40e_aq_resource_access_type access, u8 sdp_number, u64 *timeout, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_request_resource *cmd_resp = (struct i40e_aqc_request_resource *)&desc.params.raw; enum i40e_status_code status; DEBUGFUNC("i40e_aq_request_resource"); i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); cmd_resp->resource_id = CPU_TO_LE16(resource); cmd_resp->access_type = CPU_TO_LE16(access); cmd_resp->resource_number = CPU_TO_LE32(sdp_number); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); /* The completion specifies the maximum time in ms that the driver * may hold the resource in the Timeout field. * If the resource is held by someone else, the command completes with * busy return value and the timeout field indicates the maximum time * the current owner of the resource has to free it. */ if (status == I40E_SUCCESS || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) *timeout = LE32_TO_CPU(cmd_resp->timeout); return status; } /** * i40e_aq_release_resource * @hw: pointer to the hw struct * @resource: resource id * @sdp_number: resource number * @cmd_details: pointer to command details structure or NULL * * release common resource using the admin queue commands **/ enum i40e_status_code i40e_aq_release_resource(struct i40e_hw *hw, enum i40e_aq_resources_ids resource, u8 sdp_number, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_request_resource *cmd = (struct i40e_aqc_request_resource *)&desc.params.raw; enum i40e_status_code status; DEBUGFUNC("i40e_aq_release_resource"); i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); cmd->resource_id = CPU_TO_LE16(resource); cmd->resource_number = CPU_TO_LE32(sdp_number); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_read_nvm * @hw: pointer to the hw struct * @module_pointer: module pointer location in words from the NVM beginning * @offset: byte offset from the module beginning * @length: length of the section to be read (in bytes from the offset) * @data: command buffer (size [bytes] = length) * @last_command: tells if this is the last command in a series * @cmd_details: pointer to command details structure or NULL * * Read the NVM using the admin queue commands **/ enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, void *data, bool last_command, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_nvm_update *cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; enum i40e_status_code status; DEBUGFUNC("i40e_aq_read_nvm"); /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) { status = I40E_ERR_PARAM; goto i40e_aq_read_nvm_exit; } i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read); /* If this is the last command in a series, set the proper flag. */ if (last_command) cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; cmd->module_pointer = module_pointer; cmd->offset = CPU_TO_LE32(offset); cmd->length = CPU_TO_LE16(length); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (length > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); i40e_aq_read_nvm_exit: return status; } /** * i40e_aq_read_nvm_config - read an nvm config block * @hw: pointer to the hw struct * @cmd_flags: NVM access admin command bits * @field_id: field or feature id * @data: buffer for result * @buf_size: buffer size * @element_count: pointer to count of elements read by FW * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_read_nvm_config(struct i40e_hw *hw, u8 cmd_flags, u32 field_id, void *data, u16 buf_size, u16 *element_count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_nvm_config_read *cmd = (struct i40e_aqc_nvm_config_read *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_read); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); cmd->cmd_flags = CPU_TO_LE16(cmd_flags); cmd->element_id = CPU_TO_LE16((u16)(0xffff & field_id)); if (cmd_flags & I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK) cmd->element_id_msw = CPU_TO_LE16((u16)(field_id >> 16)); else cmd->element_id_msw = 0; status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details); if (!status && element_count) *element_count = LE16_TO_CPU(cmd->element_count); return status; } /** * i40e_aq_write_nvm_config - write an nvm config block * @hw: pointer to the hw struct * @cmd_flags: NVM access admin command bits * @data: buffer for result * @buf_size: buffer size * @element_count: count of elements to be written * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw, u8 cmd_flags, void *data, u16 buf_size, u16 element_count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_nvm_config_write *cmd = (struct i40e_aqc_nvm_config_write *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_write); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); cmd->element_count = CPU_TO_LE16(element_count); cmd->cmd_flags = CPU_TO_LE16(cmd_flags); status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details); return status; } /** * i40e_aq_oem_post_update - triggers an OEM specific flow after update * @hw: pointer to the hw struct * @buff: buffer for result * @buff_size: buffer size * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw, void *buff, u16 buff_size, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_oem_post_update); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status && LE16_TO_CPU(desc.retval) == I40E_AQ_RC_ESRCH) status = I40E_ERR_NOT_IMPLEMENTED; return status; } /** * i40e_aq_erase_nvm * @hw: pointer to the hw struct * @module_pointer: module pointer location in words from the NVM beginning * @offset: offset in the module (expressed in 4 KB from module's beginning) * @length: length of the section to be erased (expressed in 4 KB) * @last_command: tells if this is the last command in a series * @cmd_details: pointer to command details structure or NULL * * Erase the NVM sector using the admin queue commands **/ enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, bool last_command, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_nvm_update *cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; enum i40e_status_code status; DEBUGFUNC("i40e_aq_erase_nvm"); /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) { status = I40E_ERR_PARAM; goto i40e_aq_erase_nvm_exit; } i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase); /* If this is the last command in a series, set the proper flag. */ if (last_command) cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; cmd->module_pointer = module_pointer; cmd->offset = CPU_TO_LE32(offset); cmd->length = CPU_TO_LE16(length); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); i40e_aq_erase_nvm_exit: return status; } /** * i40e_parse_discover_capabilities * @hw: pointer to the hw struct * @buff: pointer to a buffer containing device/function capability records * @cap_count: number of capability records in the list * @list_type_opc: type of capabilities list to parse * * Parse the device/function capabilities list. **/ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, u32 cap_count, enum i40e_admin_queue_opc list_type_opc) { struct i40e_aqc_list_capabilities_element_resp *cap; u32 valid_functions, num_functions; u32 number, logical_id, phys_id; struct i40e_hw_capabilities *p; enum i40e_status_code status; u16 id, ocp_cfg_word0; u8 major_rev; u32 i = 0; cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) p = (struct i40e_hw_capabilities *)&hw->dev_caps; else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) p = (struct i40e_hw_capabilities *)&hw->func_caps; else return; for (i = 0; i < cap_count; i++, cap++) { id = LE16_TO_CPU(cap->id); number = LE32_TO_CPU(cap->number); logical_id = LE32_TO_CPU(cap->logical_id); phys_id = LE32_TO_CPU(cap->phys_id); major_rev = cap->major_rev; switch (id) { case I40E_AQ_CAP_ID_SWITCH_MODE: p->switch_mode = number; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: Switch mode = %d\n", p->switch_mode); break; case I40E_AQ_CAP_ID_MNG_MODE: p->management_mode = number; if (major_rev > 1) { p->mng_protocols_over_mctp = logical_id; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: Protocols over MCTP = %d\n", p->mng_protocols_over_mctp); } else { p->mng_protocols_over_mctp = 0; } i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: Management Mode = %d\n", p->management_mode); break; case I40E_AQ_CAP_ID_NPAR_ACTIVE: p->npar_enable = number; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: NPAR enable = %d\n", p->npar_enable); break; case I40E_AQ_CAP_ID_OS2BMC_CAP: p->os2bmc = number; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: OS2BMC = %d\n", p->os2bmc); break; case I40E_AQ_CAP_ID_FUNCTIONS_VALID: p->valid_functions = number; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: Valid Functions = %d\n", p->valid_functions); break; case I40E_AQ_CAP_ID_SRIOV: if (number == 1) p->sr_iov_1_1 = TRUE; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: SR-IOV = %d\n", p->sr_iov_1_1); break; case I40E_AQ_CAP_ID_VF: p->num_vfs = number; p->vf_base_id = logical_id; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: VF count = %d\n", p->num_vfs); i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: VF base_id = %d\n", p->vf_base_id); break; case I40E_AQ_CAP_ID_VMDQ: if (number == 1) p->vmdq = TRUE; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: VMDQ = %d\n", p->vmdq); break; case I40E_AQ_CAP_ID_8021QBG: if (number == 1) p->evb_802_1_qbg = TRUE; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: 802.1Qbg = %d\n", number); break; case I40E_AQ_CAP_ID_8021QBR: if (number == 1) p->evb_802_1_qbh = TRUE; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: 802.1Qbh = %d\n", number); break; case I40E_AQ_CAP_ID_VSI: p->num_vsis = number; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: VSI count = %d\n", p->num_vsis); break; case I40E_AQ_CAP_ID_DCB: if (number == 1) { p->dcb = TRUE; p->enabled_tcmap = logical_id; p->maxtc = phys_id; } i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: DCB = %d\n", p->dcb); i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: TC Mapping = %d\n", logical_id); i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: TC Max = %d\n", p->maxtc); break; case I40E_AQ_CAP_ID_FCOE: if (number == 1) p->fcoe = TRUE; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: FCOE = %d\n", p->fcoe); break; case I40E_AQ_CAP_ID_ISCSI: if (number == 1) p->iscsi = TRUE; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: iSCSI = %d\n", p->iscsi); break; case I40E_AQ_CAP_ID_RSS: p->rss = TRUE; p->rss_table_size = number; p->rss_table_entry_width = logical_id; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: RSS = %d\n", p->rss); i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: RSS table size = %d\n", p->rss_table_size); i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: RSS table width = %d\n", p->rss_table_entry_width); break; case I40E_AQ_CAP_ID_RXQ: p->num_rx_qp = number; p->base_queue = phys_id; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: Rx QP = %d\n", number); i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: base_queue = %d\n", p->base_queue); break; case I40E_AQ_CAP_ID_TXQ: p->num_tx_qp = number; p->base_queue = phys_id; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: Tx QP = %d\n", number); i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: base_queue = %d\n", p->base_queue); break; case I40E_AQ_CAP_ID_MSIX: p->num_msix_vectors = number; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: MSIX vector count = %d\n", p->num_msix_vectors); break; case I40E_AQ_CAP_ID_VF_MSIX: p->num_msix_vectors_vf = number; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: MSIX VF vector count = %d\n", p->num_msix_vectors_vf); break; case I40E_AQ_CAP_ID_FLEX10: if (major_rev == 1) { if (number == 1) { p->flex10_enable = TRUE; p->flex10_capable = TRUE; } } else { /* Capability revision >= 2 */ if (number & 1) p->flex10_enable = TRUE; if (number & 2) p->flex10_capable = TRUE; } p->flex10_mode = logical_id; p->flex10_status = phys_id; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: Flex10 mode = %d\n", p->flex10_mode); i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: Flex10 status = %d\n", p->flex10_status); break; case I40E_AQ_CAP_ID_CEM: if (number == 1) p->mgmt_cem = TRUE; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: CEM = %d\n", p->mgmt_cem); break; case I40E_AQ_CAP_ID_IWARP: if (number == 1) p->iwarp = TRUE; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: iWARP = %d\n", p->iwarp); break; case I40E_AQ_CAP_ID_LED: if (phys_id < I40E_HW_CAP_MAX_GPIO) p->led[phys_id] = TRUE; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: LED - PIN %d\n", phys_id); break; case I40E_AQ_CAP_ID_SDP: if (phys_id < I40E_HW_CAP_MAX_GPIO) p->sdp[phys_id] = TRUE; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: SDP - PIN %d\n", phys_id); break; case I40E_AQ_CAP_ID_MDIO: if (number == 1) { p->mdio_port_num = phys_id; p->mdio_port_mode = logical_id; } i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: MDIO port number = %d\n", p->mdio_port_num); i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: MDIO port mode = %d\n", p->mdio_port_mode); break; case I40E_AQ_CAP_ID_1588: if (number == 1) p->ieee_1588 = TRUE; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: IEEE 1588 = %d\n", p->ieee_1588); break; case I40E_AQ_CAP_ID_FLOW_DIRECTOR: p->fd = TRUE; p->fd_filters_guaranteed = number; p->fd_filters_best_effort = logical_id; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: Flow Director = 1\n"); i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: Guaranteed FD filters = %d\n", p->fd_filters_guaranteed); break; case I40E_AQ_CAP_ID_WSR_PROT: p->wr_csr_prot = (u64)number; p->wr_csr_prot |= (u64)logical_id << 32; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: wr_csr_prot = 0x%llX\n\n", (p->wr_csr_prot & 0xffff)); break; case I40E_AQ_CAP_ID_NVM_MGMT: if (number & I40E_NVM_MGMT_SEC_REV_DISABLED) p->sec_rev_disabled = TRUE; if (number & I40E_NVM_MGMT_UPDATE_DISABLED) p->update_disabled = TRUE; break; case I40E_AQ_CAP_ID_WOL_AND_PROXY: hw->num_wol_proxy_filters = (u16)number; hw->wol_proxy_vsi_seid = (u16)logical_id; p->apm_wol_support = phys_id & I40E_WOL_SUPPORT_MASK; if (phys_id & I40E_ACPI_PROGRAMMING_METHOD_MASK) p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK; else p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_HW_FVL; p->proxy_support = (phys_id & I40E_PROXY_SUPPORT_MASK) ? 1 : 0; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: WOL proxy filters = %d\n", hw->num_wol_proxy_filters); break; default: break; } } if (p->fcoe) i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); /* Always disable FCoE if compiled without the I40E_FCOE_ENA flag */ p->fcoe = FALSE; /* count the enabled ports (aka the "not disabled" ports) */ hw->num_ports = 0; for (i = 0; i < 4; i++) { u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i); u64 port_cfg = 0; /* use AQ read to get the physical register offset instead * of the port relative offset */ i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK)) hw->num_ports++; } /* OCP cards case: if a mezz is removed the ethernet port is at * disabled state in PRTGEN_CNF register. Additional NVM read is * needed in order to check if we are dealing with OCP card. * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting * physical ports results in wrong partition id calculation and thus * not supporting WoL. */ if (hw->mac.type == I40E_MAC_X722) { if (i40e_acquire_nvm(hw, I40E_RESOURCE_READ) == I40E_SUCCESS) { status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, 2 * I40E_SR_OCP_CFG_WORD0, sizeof(ocp_cfg_word0), &ocp_cfg_word0, TRUE, NULL); if (status == I40E_SUCCESS && (ocp_cfg_word0 & I40E_SR_OCP_ENABLED)) hw->num_ports = 4; i40e_release_nvm(hw); } } valid_functions = p->valid_functions; num_functions = 0; while (valid_functions) { if (valid_functions & 1) num_functions++; valid_functions >>= 1; } /* partition id is 1-based, and functions are evenly spread * across the ports as partitions */ if (hw->num_ports != 0) { hw->partition_id = (hw->pf_id / hw->num_ports) + 1; hw->num_partitions = num_functions / hw->num_ports; } /* additional HW specific goodies that might * someday be HW version specific */ p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS; } /** * i40e_aq_discover_capabilities * @hw: pointer to the hw struct * @buff: a virtual buffer to hold the capabilities * @buff_size: Size of the virtual buffer * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM * @list_type_opc: capabilities type to discover - pass in the command opcode * @cmd_details: pointer to command details structure or NULL * * Get the device capabilities descriptions from the firmware **/ enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw, void *buff, u16 buff_size, u16 *data_size, enum i40e_admin_queue_opc list_type_opc, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aqc_list_capabilites *cmd; struct i40e_aq_desc desc; enum i40e_status_code status = I40E_SUCCESS; cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; if (list_type_opc != i40e_aqc_opc_list_func_capabilities && list_type_opc != i40e_aqc_opc_list_dev_capabilities) { status = I40E_ERR_PARAM; goto exit; } i40e_fill_default_direct_cmd_desc(&desc, list_type_opc); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); *data_size = LE16_TO_CPU(desc.datalen); if (status) goto exit; i40e_parse_discover_capabilities(hw, buff, LE32_TO_CPU(cmd->count), list_type_opc); exit: return status; } /** * i40e_aq_update_nvm * @hw: pointer to the hw struct * @module_pointer: module pointer location in words from the NVM beginning * @offset: byte offset from the module beginning * @length: length of the section to be written (in bytes from the offset) * @data: command buffer (size [bytes] = length) * @last_command: tells if this is the last command in a series * @preservation_flags: Preservation mode flags * @cmd_details: pointer to command details structure or NULL * * Update the NVM using the admin queue commands **/ enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, void *data, bool last_command, u8 preservation_flags, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_nvm_update *cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; enum i40e_status_code status; DEBUGFUNC("i40e_aq_update_nvm"); /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) { status = I40E_ERR_PARAM; goto i40e_aq_update_nvm_exit; } i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); /* If this is the last command in a series, set the proper flag. */ if (last_command) cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; if (hw->mac.type == I40E_MAC_X722) { if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED) cmd->command_flags |= (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED << I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL) cmd->command_flags |= (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL << I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); } cmd->module_pointer = module_pointer; cmd->offset = CPU_TO_LE32(offset); cmd->length = CPU_TO_LE16(length); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (length > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); i40e_aq_update_nvm_exit: return status; } /** * i40e_aq_nvm_progress * @hw: pointer to the hw struct * @progress: pointer to progress returned from AQ * @cmd_details: pointer to command details structure or NULL * * Gets progress of flash rearrangement process **/ enum i40e_status_code i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress, struct i40e_asq_cmd_details *cmd_details) { enum i40e_status_code status; struct i40e_aq_desc desc; DEBUGFUNC("i40e_aq_nvm_progress"); i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_progress); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); *progress = desc.params.raw[0]; return status; } /** * i40e_aq_get_lldp_mib * @hw: pointer to the hw struct * @bridge_type: type of bridge requested * @mib_type: Local, Remote or both Local and Remote MIBs * @buff: pointer to a user supplied buffer to store the MIB block * @buff_size: size of the buffer (in bytes) * @local_len : length of the returned Local LLDP MIB * @remote_len: length of the returned Remote LLDP MIB * @cmd_details: pointer to command details structure or NULL * * Requests the complete LLDP MIB (entire packet). **/ enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, u8 mib_type, void *buff, u16 buff_size, u16 *local_len, u16 *remote_len, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_get_mib *cmd = (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; struct i40e_aqc_lldp_get_mib *resp = (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; enum i40e_status_code status; if (buff_size == 0 || !buff) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); /* Indirect Command */ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); desc.datalen = CPU_TO_LE16(buff_size); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); if (!status) { if (local_len != NULL) *local_len = LE16_TO_CPU(resp->local_len); if (remote_len != NULL) *remote_len = LE16_TO_CPU(resp->remote_len); } return status; } /** * i40e_aq_set_lldp_mib - Set the LLDP MIB * @hw: pointer to the hw struct * @mib_type: Local, Remote or both Local and Remote MIBs * @buff: pointer to a user supplied buffer to store the MIB block * @buff_size: size of the buffer (in bytes) * @cmd_details: pointer to command details structure or NULL * * Set the LLDP MIB. **/ enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw, u8 mib_type, void *buff, u16 buff_size, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_set_local_mib *cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw; enum i40e_status_code status; if (buff_size == 0 || !buff) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_set_local_mib); /* Indirect Command */ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); desc.datalen = CPU_TO_LE16(buff_size); cmd->type = mib_type; cmd->length = CPU_TO_LE16(buff_size); cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)buff)); cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)buff)); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); return status; } /** * i40e_aq_cfg_lldp_mib_change_event * @hw: pointer to the hw struct * @enable_update: Enable or Disable event posting * @cmd_details: pointer to command details structure or NULL * * Enable or Disable posting of an event on ARQ when LLDP MIB * associated with the interface changes **/ enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, bool enable_update, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_update_mib *cmd = (struct i40e_aqc_lldp_update_mib *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); if (!enable_update) cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_restore_lldp * @hw: pointer to the hw struct * @setting: pointer to factory setting variable or NULL * @restore: True if factory settings should be restored * @cmd_details: pointer to command details structure or NULL * * Restore LLDP Agent factory settings if @restore set to True. In other case * only returns factory setting in AQ response. **/ enum i40e_status_code i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_restore *cmd = (struct i40e_aqc_lldp_restore *)&desc.params.raw; enum i40e_status_code status; if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) { i40e_debug(hw, I40E_DEBUG_ALL, "Restore LLDP not supported by current FW version.\n"); return I40E_ERR_DEVICE_NOT_SUPPORTED; } i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore); if (restore) cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (setting) *setting = cmd->command & 1; return status; } /** * i40e_aq_stop_lldp * @hw: pointer to the hw struct * @shutdown_agent: True if LLDP Agent needs to be Shutdown * @persist: True if stop of LLDP should be persistent across power cycles * @cmd_details: pointer to command details structure or NULL * * Stop or Shutdown the embedded LLDP Agent **/ enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, bool persist, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_stop *cmd = (struct i40e_aqc_lldp_stop *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); if (shutdown_agent) cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; if (persist) { if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST; else i40e_debug(hw, I40E_DEBUG_ALL, "Persistent Stop LLDP not supported by current FW version.\n"); } status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_start_lldp * @hw: pointer to the hw struct * @persist: True if start of LLDP should be persistent across power cycles * @cmd_details: pointer to command details structure or NULL * * Start the embedded LLDP Agent on all ports. **/ enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_start *cmd = (struct i40e_aqc_lldp_start *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); cmd->command = I40E_AQ_LLDP_AGENT_START; if (persist) { if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST; else i40e_debug(hw, I40E_DEBUG_ALL, "Persistent Start LLDP not supported by current FW version.\n"); } status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_dcb_parameters * @hw: pointer to the hw struct * @cmd_details: pointer to command details structure or NULL * @dcb_enable: True if DCB configuration needs to be applied * **/ enum i40e_status_code i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_dcb_parameters *cmd = (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; enum i40e_status_code status; if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) return I40E_ERR_DEVICE_NOT_SUPPORTED; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_dcb_parameters); if (dcb_enable) { cmd->valid_flags = I40E_DCB_VALID; cmd->command = I40E_AQ_DCB_SET_AGENT; } status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_cee_dcb_config * @hw: pointer to the hw struct * @buff: response buffer that stores CEE operational configuration * @buff_size: size of the buffer passed * @cmd_details: pointer to command details structure or NULL * * Get CEE DCBX mode operational configuration from firmware **/ enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, void *buff, u16 buff_size, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; enum i40e_status_code status; if (buff_size == 0 || !buff) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, cmd_details); return status; } /** * i40e_aq_start_stop_dcbx - Start/Stop DCBx service in FW * @hw: pointer to the hw struct * @start_agent: True if DCBx Agent needs to be Started * False if DCBx Agent needs to be Stopped * @cmd_details: pointer to command details structure or NULL * * Start/Stop the embedded dcbx Agent **/ enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw, bool start_agent, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_stop_start_specific_agent *cmd = (struct i40e_aqc_lldp_stop_start_specific_agent *) &desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop_start_spec_agent); if (start_agent) cmd->command = I40E_AQC_START_SPECIFIC_AGENT_MASK; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_add_udp_tunnel * @hw: pointer to the hw struct * @udp_port: the UDP port to add in Host byte order * @protocol_index: protocol index type * @filter_index: pointer to filter index * @cmd_details: pointer to command details structure or NULL * * Note: Firmware expects the udp_port value to be in Little Endian format, * and this function will call CPU_TO_LE16 to convert from Host byte order to * Little Endian order. **/ enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw, u16 udp_port, u8 protocol_index, u8 *filter_index, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_udp_tunnel *cmd = (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; struct i40e_aqc_del_udp_tunnel_completion *resp = (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); cmd->udp_port = CPU_TO_LE16(udp_port); cmd->protocol_type = protocol_index; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status && filter_index) *filter_index = resp->index; return status; } /** * i40e_aq_del_udp_tunnel * @hw: pointer to the hw struct * @index: filter index * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_remove_udp_tunnel *cmd = (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); cmd->index = index; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_switch_resource_alloc (0x0204) * @hw: pointer to the hw struct * @num_entries: pointer to u8 to store the number of resource entries returned * @buf: pointer to a user supplied buffer. This buffer must be large enough * to store the resource information for all resource types. Each * resource type is a i40e_aqc_switch_resource_alloc_data structure. * @count: size, in bytes, of the buffer provided * @cmd_details: pointer to command details structure or NULL * * Query the resources allocated to a function. **/ enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw, u8 *num_entries, struct i40e_aqc_switch_resource_alloc_element_resp *buf, u16 count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_switch_resource_alloc *cmd_resp = (struct i40e_aqc_get_switch_resource_alloc *)&desc.params.raw; enum i40e_status_code status; u16 length = count * sizeof(*buf); i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_switch_resource_alloc); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (length > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details); if (!status && num_entries) *num_entries = cmd_resp->num_entries; return status; } /** * i40e_aq_delete_element - Delete switch element * @hw: pointer to the hw struct * @seid: the SEID to delete from the switch * @cmd_details: pointer to command details structure or NULL * * This deletes a switch element from the switch. **/ enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_switch_seid *cmd = (struct i40e_aqc_switch_seid *)&desc.params.raw; enum i40e_status_code status; if (seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_add_pvirt - Instantiate a Port Virtualizer on a port * @hw: pointer to the hw struct * @flags: component flags * @mac_seid: uplink seid (MAC SEID) * @vsi_seid: connected vsi seid * @ret_seid: seid of create pv component * * This instantiates an i40e port virtualizer with specified flags. * Depending on specified flags the port virtualizer can act as a * 802.1Qbr port virtualizer or a 802.1Qbg S-component. */ enum i40e_status_code i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags, u16 mac_seid, u16 vsi_seid, u16 *ret_seid) { struct i40e_aq_desc desc; struct i40e_aqc_add_update_pv *cmd = (struct i40e_aqc_add_update_pv *)&desc.params.raw; struct i40e_aqc_add_update_pv_completion *resp = (struct i40e_aqc_add_update_pv_completion *)&desc.params.raw; enum i40e_status_code status; if (vsi_seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_pv); cmd->command_flags = CPU_TO_LE16(flags); cmd->uplink_seid = CPU_TO_LE16(mac_seid); cmd->connected_seid = CPU_TO_LE16(vsi_seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); if (!status && ret_seid) *ret_seid = LE16_TO_CPU(resp->pv_seid); return status; } /** * i40e_aq_add_tag - Add an S/E-tag * @hw: pointer to the hw struct * @direct_to_queue: should s-tag direct flow to a specific queue * @vsi_seid: VSI SEID to use this tag * @tag: value of the tag * @queue_num: queue number, only valid is direct_to_queue is TRUE * @tags_used: return value, number of tags in use by this PF * @tags_free: return value, number of unallocated tags * @cmd_details: pointer to command details structure or NULL * * This associates an S- or E-tag to a VSI in the switch complex. It returns * the number of tags allocated by the PF, and the number of unallocated * tags available. **/ enum i40e_status_code i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue, u16 vsi_seid, u16 tag, u16 queue_num, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_tag *cmd = (struct i40e_aqc_add_tag *)&desc.params.raw; struct i40e_aqc_add_remove_tag_completion *resp = (struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw; enum i40e_status_code status; if (vsi_seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_tag); cmd->seid = CPU_TO_LE16(vsi_seid); cmd->tag = CPU_TO_LE16(tag); if (direct_to_queue) { cmd->flags = CPU_TO_LE16(I40E_AQC_ADD_TAG_FLAG_TO_QUEUE); cmd->queue_number = CPU_TO_LE16(queue_num); } status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { if (tags_used != NULL) *tags_used = LE16_TO_CPU(resp->tags_used); if (tags_free != NULL) *tags_free = LE16_TO_CPU(resp->tags_free); } return status; } /** * i40e_aq_remove_tag - Remove an S- or E-tag * @hw: pointer to the hw struct * @vsi_seid: VSI SEID this tag is associated with * @tag: value of the S-tag to delete * @tags_used: return value, number of tags in use by this PF * @tags_free: return value, number of unallocated tags * @cmd_details: pointer to command details structure or NULL * * This deletes an S- or E-tag from a VSI in the switch complex. It returns * the number of tags allocated by the PF, and the number of unallocated * tags available. **/ enum i40e_status_code i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid, u16 tag, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_remove_tag *cmd = (struct i40e_aqc_remove_tag *)&desc.params.raw; struct i40e_aqc_add_remove_tag_completion *resp = (struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw; enum i40e_status_code status; if (vsi_seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_tag); cmd->seid = CPU_TO_LE16(vsi_seid); cmd->tag = CPU_TO_LE16(tag); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { if (tags_used != NULL) *tags_used = LE16_TO_CPU(resp->tags_used); if (tags_free != NULL) *tags_free = LE16_TO_CPU(resp->tags_free); } return status; } /** * i40e_aq_add_mcast_etag - Add a multicast E-tag * @hw: pointer to the hw struct * @pv_seid: Port Virtualizer of this SEID to associate E-tag with * @etag: value of E-tag to add * @num_tags_in_buf: number of unicast E-tags in indirect buffer * @buf: address of indirect buffer * @tags_used: return value, number of E-tags in use by this port * @tags_free: return value, number of unallocated M-tags * @cmd_details: pointer to command details structure or NULL * * This associates a multicast E-tag to a port virtualizer. It will return * the number of tags allocated by the PF, and the number of unallocated * tags available. * * The indirect buffer pointed to by buf is a list of 2-byte E-tags, * num_tags_in_buf long. **/ enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pv_seid, u16 etag, u8 num_tags_in_buf, void *buf, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_mcast_etag *cmd = (struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw; struct i40e_aqc_add_remove_mcast_etag_completion *resp = (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw; enum i40e_status_code status; u16 length = sizeof(u16) * num_tags_in_buf; if ((pv_seid == 0) || (buf == NULL) || (num_tags_in_buf == 0)) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_multicast_etag); cmd->pv_seid = CPU_TO_LE16(pv_seid); cmd->etag = CPU_TO_LE16(etag); cmd->num_unicast_etags = num_tags_in_buf; desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details); if (!status) { if (tags_used != NULL) *tags_used = LE16_TO_CPU(resp->mcast_etags_used); if (tags_free != NULL) *tags_free = LE16_TO_CPU(resp->mcast_etags_free); } return status; } /** * i40e_aq_remove_mcast_etag - Remove a multicast E-tag * @hw: pointer to the hw struct * @pv_seid: Port Virtualizer SEID this M-tag is associated with * @etag: value of the E-tag to remove * @tags_used: return value, number of tags in use by this port * @tags_free: return value, number of unallocated tags * @cmd_details: pointer to command details structure or NULL * * This deletes an E-tag from the port virtualizer. It will return * the number of tags allocated by the port, and the number of unallocated * tags available. **/ enum i40e_status_code i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pv_seid, u16 etag, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_mcast_etag *cmd = (struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw; struct i40e_aqc_add_remove_mcast_etag_completion *resp = (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw; enum i40e_status_code status; if (pv_seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_multicast_etag); cmd->pv_seid = CPU_TO_LE16(pv_seid); cmd->etag = CPU_TO_LE16(etag); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { if (tags_used != NULL) *tags_used = LE16_TO_CPU(resp->mcast_etags_used); if (tags_free != NULL) *tags_free = LE16_TO_CPU(resp->mcast_etags_free); } return status; } /** * i40e_aq_update_tag - Update an S/E-tag * @hw: pointer to the hw struct * @vsi_seid: VSI SEID using this S-tag * @old_tag: old tag value * @new_tag: new tag value * @tags_used: return value, number of tags in use by this PF * @tags_free: return value, number of unallocated tags * @cmd_details: pointer to command details structure or NULL * * This updates the value of the tag currently attached to this VSI * in the switch complex. It will return the number of tags allocated * by the PF, and the number of unallocated tags available. **/ enum i40e_status_code i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid, u16 old_tag, u16 new_tag, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_update_tag *cmd = (struct i40e_aqc_update_tag *)&desc.params.raw; struct i40e_aqc_update_tag_completion *resp = (struct i40e_aqc_update_tag_completion *)&desc.params.raw; enum i40e_status_code status; if (vsi_seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_update_tag); cmd->seid = CPU_TO_LE16(vsi_seid); cmd->old_tag = CPU_TO_LE16(old_tag); cmd->new_tag = CPU_TO_LE16(new_tag); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { if (tags_used != NULL) *tags_used = LE16_TO_CPU(resp->tags_used); if (tags_free != NULL) *tags_free = LE16_TO_CPU(resp->tags_free); } return status; } /** * i40e_aq_dcb_ignore_pfc - Ignore PFC for given TCs * @hw: pointer to the hw struct * @tcmap: TC map for request/release any ignore PFC condition * @request: request or release ignore PFC condition * @tcmap_ret: return TCs for which PFC is currently ignored * @cmd_details: pointer to command details structure or NULL * * This sends out request/release to ignore PFC condition for a TC. * It will return the TCs for which PFC is currently ignored. **/ enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_pfc_ignore *cmd_resp = (struct i40e_aqc_pfc_ignore *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_ignore_pfc); if (request) cmd_resp->command_flags = I40E_AQC_PFC_IGNORE_SET; cmd_resp->tc_bitmap = tcmap; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { if (tcmap_ret != NULL) *tcmap_ret = cmd_resp->tc_bitmap; } return status; } /** * i40e_aq_dcb_updated - DCB Updated Command * @hw: pointer to the hw struct * @cmd_details: pointer to command details structure or NULL * * When LLDP is handled in PF this command is used by the PF * to notify EMP that a DCB setting is modified. * When LLDP is handled in EMP this command is used by the PF * to notify EMP whenever one of the following parameters get * modified: * - PFCLinkDelayAllowance in PRTDCB_GENC.PFCLDA * - PCIRTT in PRTDCB_GENC.PCIRTT * - Maximum Frame Size for non-FCoE TCs set by PRTDCB_TDPUC.MAX_TXFRAME. * EMP will return when the shared RPB settings have been * recomputed and modified. The retval field in the descriptor * will be set to 0 when RPB is modified. **/ enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_add_statistics - Add a statistics block to a VLAN in a switch. * @hw: pointer to the hw struct * @seid: defines the SEID of the switch for which the stats are requested * @vlan_id: the VLAN ID for which the statistics are requested * @stat_index: index of the statistics counters block assigned to this VLAN * @cmd_details: pointer to command details structure or NULL * * XL710 supports 128 smonVlanStats counters.This command is used to * allocate a set of smonVlanStats counters to a specific VLAN in a specific * switch. **/ enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid, u16 vlan_id, u16 *stat_index, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_statistics *cmd_resp = (struct i40e_aqc_add_remove_statistics *)&desc.params.raw; enum i40e_status_code status; if ((seid == 0) || (stat_index == NULL)) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_statistics); cmd_resp->seid = CPU_TO_LE16(seid); cmd_resp->vlan = CPU_TO_LE16(vlan_id); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status && stat_index) *stat_index = LE16_TO_CPU(cmd_resp->stat_index); return status; } /** * i40e_aq_remove_statistics - Remove a statistics block to a VLAN in a switch. * @hw: pointer to the hw struct * @seid: defines the SEID of the switch for which the stats are requested * @vlan_id: the VLAN ID for which the statistics are requested * @stat_index: index of the statistics counters block assigned to this VLAN * @cmd_details: pointer to command details structure or NULL * * XL710 supports 128 smonVlanStats counters.This command is used to * deallocate a set of smonVlanStats counters to a specific VLAN in a specific * switch. **/ enum i40e_status_code i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid, u16 vlan_id, u16 stat_index, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_statistics *cmd = (struct i40e_aqc_add_remove_statistics *)&desc.params.raw; enum i40e_status_code status; if (seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_statistics); cmd->seid = CPU_TO_LE16(seid); cmd->vlan = CPU_TO_LE16(vlan_id); cmd->stat_index = CPU_TO_LE16(stat_index); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_port_parameters - set physical port parameters. * @hw: pointer to the hw struct * @bad_frame_vsi: defines the VSI to which bad frames are forwarded * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded * @double_vlan: if set double VLAN is enabled * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_set_port_parameters(struct i40e_hw *hw, u16 bad_frame_vsi, bool save_bad_pac, bool pad_short_pac, bool double_vlan, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aqc_set_port_parameters *cmd; enum i40e_status_code status; struct i40e_aq_desc desc; u16 command_flags = 0; cmd = (struct i40e_aqc_set_port_parameters *)&desc.params.raw; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_port_parameters); cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi); if (save_bad_pac) command_flags |= I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS; if (pad_short_pac) command_flags |= I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS; if (double_vlan) command_flags |= I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA; cmd->command_flags = CPU_TO_LE16(command_flags); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler * @hw: pointer to the hw struct * @seid: seid for the physical port/switching component/vsi * @buff: Indirect buffer to hold data parameters and response * @buff_size: Indirect buffer size * @opcode: Tx scheduler AQ command opcode * @cmd_details: pointer to command details structure or NULL * * Generic command handler for Tx scheduler AQ commands **/ static enum i40e_status_code i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, void *buff, u16 buff_size, enum i40e_admin_queue_opc opcode, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_tx_sched_ind *cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; enum i40e_status_code status; bool cmd_param_flag = FALSE; switch (opcode) { case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit: case i40e_aqc_opc_configure_vsi_tc_bw: case i40e_aqc_opc_enable_switching_comp_ets: case i40e_aqc_opc_modify_switching_comp_ets: case i40e_aqc_opc_disable_switching_comp_ets: case i40e_aqc_opc_configure_switching_comp_ets_bw_limit: case i40e_aqc_opc_configure_switching_comp_bw_config: cmd_param_flag = TRUE; break; case i40e_aqc_opc_query_vsi_bw_config: case i40e_aqc_opc_query_vsi_ets_sla_config: case i40e_aqc_opc_query_switching_comp_ets_config: case i40e_aqc_opc_query_port_ets_config: case i40e_aqc_opc_query_switching_comp_bw_config: cmd_param_flag = FALSE; break; default: return I40E_ERR_PARAM; } i40e_fill_default_direct_cmd_desc(&desc, opcode); /* Indirect command */ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (cmd_param_flag) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); desc.datalen = CPU_TO_LE16(buff_size); cmd->vsi_seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); return status; } /** * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit * @hw: pointer to the hw struct * @seid: VSI seid * @credit: BW limit credits (0 = disabled) * @max_credit: Max BW limit credits * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, u16 seid, u16 credit, u8 max_credit, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_configure_vsi_bw_limit *cmd = (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_configure_vsi_bw_limit); cmd->vsi_seid = CPU_TO_LE16(seid); cmd->credit = CPU_TO_LE16(credit); cmd->max_credit = max_credit; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_config_switch_comp_bw_limit - Configure Switching component BW Limit * @hw: pointer to the hw struct * @seid: switching component seid * @credit: BW limit credits (0 = disabled) * @max_bw: Max BW limit credits * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw, u16 seid, u16 credit, u8 max_bw, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_configure_switching_comp_bw_limit *cmd = (struct i40e_aqc_configure_switching_comp_bw_limit *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_configure_switching_comp_bw_limit); cmd->seid = CPU_TO_LE16(seid); cmd->credit = CPU_TO_LE16(credit); cmd->max_bw = max_bw; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_config_vsi_ets_sla_bw_limit - Config VSI BW Limit per TC * @hw: pointer to the hw struct * @seid: VSI seid * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_configure_vsi_ets_sla_bw_limit, cmd_details); } /** * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC * @hw: pointer to the hw struct * @seid: VSI seid * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_configure_vsi_tc_bw, cmd_details); } /** * i40e_aq_config_switch_comp_ets_bw_limit - Config Switch comp BW Limit per TC * @hw: pointer to the hw struct * @seid: seid of the switching component * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit( struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_configure_switching_comp_ets_bw_limit, cmd_details); } /** * i40e_aq_query_vsi_bw_config - Query VSI BW configuration * @hw: pointer to the hw struct * @seid: seid of the VSI * @bw_data: Buffer to hold VSI BW configuration * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_vsi_bw_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_vsi_bw_config, cmd_details); } /** * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC * @hw: pointer to the hw struct * @seid: seid of the VSI * @bw_data: Buffer to hold VSI BW configuration per TC * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_vsi_ets_sla_config, cmd_details); } /** * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC * @hw: pointer to the hw struct * @seid: seid of the switching component * @bw_data: Buffer to hold switching component's per TC BW config * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_switching_comp_ets_config, cmd_details); } /** * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration * @hw: pointer to the hw struct * @seid: seid of the VSI or switching component connected to Physical Port * @bw_data: Buffer to hold current ETS configuration for the Physical Port * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_query_port_ets_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_port_ets_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_port_ets_config, cmd_details); } /** * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration * @hw: pointer to the hw struct * @seid: seid of the switching component * @bw_data: Buffer to hold switching component's BW configuration * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_switching_comp_bw_config, cmd_details); } /** * i40e_validate_filter_settings * @hw: pointer to the hardware structure * @settings: Filter control settings * * Check and validate the filter control settings passed. * The function checks for the valid filter/context sizes being * passed for FCoE and PE. * * Returns I40E_SUCCESS if the values passed are valid and within * range else returns an error. **/ static enum i40e_status_code i40e_validate_filter_settings(struct i40e_hw *hw, struct i40e_filter_control_settings *settings) { u32 fcoe_cntx_size, fcoe_filt_size; u32 pe_cntx_size, pe_filt_size; u32 fcoe_fmax; u32 val; /* Validate FCoE settings passed */ switch (settings->fcoe_filt_num) { case I40E_HASH_FILTER_SIZE_1K: case I40E_HASH_FILTER_SIZE_2K: case I40E_HASH_FILTER_SIZE_4K: case I40E_HASH_FILTER_SIZE_8K: case I40E_HASH_FILTER_SIZE_16K: case I40E_HASH_FILTER_SIZE_32K: fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE; fcoe_filt_size <<= (u32)settings->fcoe_filt_num; break; default: return I40E_ERR_PARAM; } switch (settings->fcoe_cntx_num) { case I40E_DMA_CNTX_SIZE_512: case I40E_DMA_CNTX_SIZE_1K: case I40E_DMA_CNTX_SIZE_2K: case I40E_DMA_CNTX_SIZE_4K: fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num; break; default: return I40E_ERR_PARAM; } /* Validate PE settings passed */ switch (settings->pe_filt_num) { case I40E_HASH_FILTER_SIZE_1K: case I40E_HASH_FILTER_SIZE_2K: case I40E_HASH_FILTER_SIZE_4K: case I40E_HASH_FILTER_SIZE_8K: case I40E_HASH_FILTER_SIZE_16K: case I40E_HASH_FILTER_SIZE_32K: case I40E_HASH_FILTER_SIZE_64K: case I40E_HASH_FILTER_SIZE_128K: case I40E_HASH_FILTER_SIZE_256K: case I40E_HASH_FILTER_SIZE_512K: case I40E_HASH_FILTER_SIZE_1M: pe_filt_size = I40E_HASH_FILTER_BASE_SIZE; pe_filt_size <<= (u32)settings->pe_filt_num; break; default: return I40E_ERR_PARAM; } switch (settings->pe_cntx_num) { case I40E_DMA_CNTX_SIZE_512: case I40E_DMA_CNTX_SIZE_1K: case I40E_DMA_CNTX_SIZE_2K: case I40E_DMA_CNTX_SIZE_4K: case I40E_DMA_CNTX_SIZE_8K: case I40E_DMA_CNTX_SIZE_16K: case I40E_DMA_CNTX_SIZE_32K: case I40E_DMA_CNTX_SIZE_64K: case I40E_DMA_CNTX_SIZE_128K: case I40E_DMA_CNTX_SIZE_256K: pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; pe_cntx_size <<= (u32)settings->pe_cntx_num; break; default: return I40E_ERR_PARAM; } /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ val = rd32(hw, I40E_GLHMC_FCOEFMAX); fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK) >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT; if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) return I40E_ERR_INVALID_SIZE; return I40E_SUCCESS; } /** * i40e_set_filter_control * @hw: pointer to the hardware structure * @settings: Filter control settings * * Set the Queue Filters for PE/FCoE and enable filters required * for a single PF. It is expected that these settings are programmed * at the driver initialization time. **/ enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw, struct i40e_filter_control_settings *settings) { enum i40e_status_code ret = I40E_SUCCESS; u32 hash_lut_size = 0; u32 val; if (!settings) return I40E_ERR_PARAM; /* Validate the input settings */ ret = i40e_validate_filter_settings(hw, settings); if (ret) return ret; /* Read the PF Queue Filter control register */ val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); /* Program required PE hash buckets for the PF */ val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) & I40E_PFQF_CTL_0_PEHSIZE_MASK; /* Program required PE contexts for the PF */ val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK; val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) & I40E_PFQF_CTL_0_PEDSIZE_MASK; /* Program required FCoE hash buckets for the PF */ val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK; val |= ((u32)settings->fcoe_filt_num << I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) & I40E_PFQF_CTL_0_PFFCHSIZE_MASK; /* Program required FCoE DDP contexts for the PF */ val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK; val |= ((u32)settings->fcoe_cntx_num << I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) & I40E_PFQF_CTL_0_PFFCDSIZE_MASK; /* Program Hash LUT size for the PF */ val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512) hash_lut_size = 1; val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) & I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */ if (settings->enable_fdir) val |= I40E_PFQF_CTL_0_FD_ENA_MASK; if (settings->enable_ethtype) val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK; if (settings->enable_macvlan) val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); return I40E_SUCCESS; } /** * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter * @hw: pointer to the hw struct * @mac_addr: MAC address to use in the filter * @ethtype: Ethertype to use in the filter * @flags: Flags that needs to be applied to the filter * @vsi_seid: seid of the control VSI * @queue: VSI queue number to send the packet to * @is_add: Add control packet filter if True else remove * @stats: Structure to hold information on control filter counts * @cmd_details: pointer to command details structure or NULL * * This command will Add or Remove control packet filter for a control VSI. * In return it will update the total number of perfect filter count in * the stats member. **/ enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, u8 *mac_addr, u16 ethtype, u16 flags, u16 vsi_seid, u16 queue, bool is_add, struct i40e_control_filter_stats *stats, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_control_packet_filter *cmd = (struct i40e_aqc_add_remove_control_packet_filter *) &desc.params.raw; struct i40e_aqc_add_remove_control_packet_filter_completion *resp = (struct i40e_aqc_add_remove_control_packet_filter_completion *) &desc.params.raw; enum i40e_status_code status; if (vsi_seid == 0) return I40E_ERR_PARAM; if (is_add) { i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_control_packet_filter); cmd->queue = CPU_TO_LE16(queue); } else { i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_control_packet_filter); } if (mac_addr) i40e_memcpy(cmd->mac, mac_addr, ETH_ALEN, I40E_NONDMA_TO_NONDMA); cmd->etype = CPU_TO_LE16(ethtype); cmd->flags = CPU_TO_LE16(flags); cmd->seid = CPU_TO_LE16(vsi_seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status && stats) { stats->mac_etype_used = LE16_TO_CPU(resp->mac_etype_used); stats->etype_used = LE16_TO_CPU(resp->etype_used); stats->mac_etype_free = LE16_TO_CPU(resp->mac_etype_free); stats->etype_free = LE16_TO_CPU(resp->etype_free); } return status; } /** * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control * @hw: pointer to the hw struct * @seid: VSI seid to add ethertype filter from **/ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, u16 seid) { #define I40E_FLOW_CONTROL_ETHTYPE 0x8808 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; enum i40e_status_code status; status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, seid, 0, TRUE, NULL, NULL); if (status) DEBUGOUT("Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); } /** * i40e_fix_up_geneve_vni - adjust Geneve VNI for HW issue * @filters: list of cloud filters * @filter_count: length of list * * There's an issue in the device where the Geneve VNI layout needs * to be shifted 1 byte over from the VxLAN VNI **/ static void i40e_fix_up_geneve_vni( struct i40e_aqc_cloud_filters_element_data *filters, u8 filter_count) { struct i40e_aqc_cloud_filters_element_data *f = filters; int i; for (i = 0; i < filter_count; i++) { u16 tnl_type; u32 ti; tnl_type = (LE16_TO_CPU(f[i].flags) & I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { ti = LE32_TO_CPU(f[i].tenant_id); f[i].tenant_id = CPU_TO_LE32(ti << 8); } } } /** * i40e_aq_add_cloud_filters * @hw: pointer to the hardware structure * @seid: VSI seid to add cloud filters from * @filters: Buffer which contains the filters to be added * @filter_count: number of filters contained in the buffer * * Set the cloud filters for a given VSI. The contents of the * i40e_aqc_cloud_filters_element_data are filled * in by the caller of the function. * **/ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid, struct i40e_aqc_cloud_filters_element_data *filters, u8 filter_count) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_cloud_filters *cmd = (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; enum i40e_status_code status; u16 buff_len; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_cloud_filters); buff_len = filter_count * sizeof(*filters); desc.datalen = CPU_TO_LE16(buff_len); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); cmd->num_filters = filter_count; cmd->seid = CPU_TO_LE16(seid); i40e_fix_up_geneve_vni(filters, filter_count); status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); return status; } /** * i40e_aq_add_cloud_filters_bb * @hw: pointer to the hardware structure * @seid: VSI seid to add cloud filters from * @filters: Buffer which contains the filters in big buffer to be added * @filter_count: number of filters contained in the buffer * * Set the cloud filters for a given VSI. The contents of the * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the * the function. * **/ enum i40e_status_code i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, struct i40e_aqc_cloud_filters_element_bb *filters, u8 filter_count) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_cloud_filters *cmd = (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; enum i40e_status_code status; u16 buff_len; int i; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_cloud_filters); buff_len = filter_count * sizeof(*filters); desc.datalen = CPU_TO_LE16(buff_len); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); cmd->num_filters = filter_count; cmd->seid = CPU_TO_LE16(seid); cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; for (i = 0; i < filter_count; i++) { u16 tnl_type; u32 ti; tnl_type = (LE16_TO_CPU(filters[i].element.flags) & I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; /* Due to hardware eccentricities, the VNI for Geneve is shifted * one more byte further than normally used for Tenant ID in * other tunnel types. */ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { ti = LE32_TO_CPU(filters[i].element.tenant_id); filters[i].element.tenant_id = CPU_TO_LE32(ti << 8); } } status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); return status; } /** * i40e_aq_rem_cloud_filters * @hw: pointer to the hardware structure * @seid: VSI seid to remove cloud filters from * @filters: Buffer which contains the filters to be removed * @filter_count: number of filters contained in the buffer * * Remove the cloud filters for a given VSI. The contents of the * i40e_aqc_cloud_filters_element_data are filled in by the caller * of the function. * **/ enum i40e_status_code i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid, struct i40e_aqc_cloud_filters_element_data *filters, u8 filter_count) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_cloud_filters *cmd = (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; enum i40e_status_code status; u16 buff_len; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_cloud_filters); buff_len = filter_count * sizeof(*filters); desc.datalen = CPU_TO_LE16(buff_len); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); cmd->num_filters = filter_count; cmd->seid = CPU_TO_LE16(seid); i40e_fix_up_geneve_vni(filters, filter_count); status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); return status; } /** * i40e_aq_rem_cloud_filters_bb * @hw: pointer to the hardware structure * @seid: VSI seid to remove cloud filters from * @filters: Buffer which contains the filters in big buffer to be removed * @filter_count: number of filters contained in the buffer * * Remove the big buffer cloud filters for a given VSI. The contents of the * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the * function. * **/ enum i40e_status_code i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, struct i40e_aqc_cloud_filters_element_bb *filters, u8 filter_count) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_cloud_filters *cmd = (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; enum i40e_status_code status; u16 buff_len; int i; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_cloud_filters); buff_len = filter_count * sizeof(*filters); desc.datalen = CPU_TO_LE16(buff_len); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); cmd->num_filters = filter_count; cmd->seid = CPU_TO_LE16(seid); cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; for (i = 0; i < filter_count; i++) { u16 tnl_type; u32 ti; tnl_type = (LE16_TO_CPU(filters[i].element.flags) & I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; /* Due to hardware eccentricities, the VNI for Geneve is shifted * one more byte further than normally used for Tenant ID in * other tunnel types. */ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { ti = LE32_TO_CPU(filters[i].element.tenant_id); filters[i].element.tenant_id = CPU_TO_LE32(ti << 8); } } status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); return status; } /** * i40e_aq_replace_cloud_filters - Replace cloud filter command * @hw: pointer to the hw struct * @filters: pointer to the i40e_aqc_replace_cloud_filter_cmd struct * @cmd_buf: pointer to the i40e_aqc_replace_cloud_filter_cmd_buf struct * **/ enum i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw, struct i40e_aqc_replace_cloud_filters_cmd *filters, struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf) { struct i40e_aq_desc desc; struct i40e_aqc_replace_cloud_filters_cmd *cmd = (struct i40e_aqc_replace_cloud_filters_cmd *)&desc.params.raw; enum i40e_status_code status = I40E_SUCCESS; int i = 0; /* X722 doesn't support this command */ if (hw->mac.type == I40E_MAC_X722) return I40E_ERR_DEVICE_NOT_SUPPORTED; /* need FW version greater than 6.00 */ if (hw->aq.fw_maj_ver < 6) return I40E_NOT_SUPPORTED; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_replace_cloud_filters); desc.datalen = CPU_TO_LE16(32); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); cmd->old_filter_type = filters->old_filter_type; cmd->new_filter_type = filters->new_filter_type; cmd->valid_flags = filters->valid_flags; cmd->tr_bit = filters->tr_bit; cmd->tr_bit2 = filters->tr_bit2; status = i40e_asq_send_command(hw, &desc, cmd_buf, sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf), NULL); /* for get cloud filters command */ for (i = 0; i < 32; i += 4) { cmd_buf->filters[i / 4].filter_type = cmd_buf->data[i]; cmd_buf->filters[i / 4].input[0] = cmd_buf->data[i + 1]; cmd_buf->filters[i / 4].input[1] = cmd_buf->data[i + 2]; cmd_buf->filters[i / 4].input[2] = cmd_buf->data[i + 3]; } return status; } /** * i40e_aq_alternate_write * @hw: pointer to the hardware structure * @reg_addr0: address of first dword to be read * @reg_val0: value to be written under 'reg_addr0' * @reg_addr1: address of second dword to be read * @reg_val1: value to be written under 'reg_addr1' * * Write one or two dwords to alternate structure. Fields are indicated * by 'reg_addr0' and 'reg_addr1' register numbers. * **/ enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw, u32 reg_addr0, u32 reg_val0, u32 reg_addr1, u32 reg_val1) { struct i40e_aq_desc desc; struct i40e_aqc_alternate_write *cmd_resp = (struct i40e_aqc_alternate_write *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_write); cmd_resp->address0 = CPU_TO_LE32(reg_addr0); cmd_resp->address1 = CPU_TO_LE32(reg_addr1); cmd_resp->data0 = CPU_TO_LE32(reg_val0); cmd_resp->data1 = CPU_TO_LE32(reg_val1); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); return status; } /** * i40e_aq_alternate_write_indirect * @hw: pointer to the hardware structure * @addr: address of a first register to be modified * @dw_count: number of alternate structure fields to write * @buffer: pointer to the command buffer * * Write 'dw_count' dwords from 'buffer' to alternate structure * starting at 'addr'. * **/ enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw, u32 addr, u32 dw_count, void *buffer) { struct i40e_aq_desc desc; struct i40e_aqc_alternate_ind_write *cmd_resp = (struct i40e_aqc_alternate_ind_write *)&desc.params.raw; enum i40e_status_code status; if (buffer == NULL) return I40E_ERR_PARAM; /* Indirect command */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_write_indirect); desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD); desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF); if (dw_count > (I40E_AQ_LARGE_BUF/4)) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); cmd_resp->address = CPU_TO_LE32(addr); cmd_resp->length = CPU_TO_LE32(dw_count); status = i40e_asq_send_command(hw, &desc, buffer, I40E_LO_DWORD(4*dw_count), NULL); return status; } /** * i40e_aq_alternate_read * @hw: pointer to the hardware structure * @reg_addr0: address of first dword to be read * @reg_val0: pointer for data read from 'reg_addr0' * @reg_addr1: address of second dword to be read * @reg_val1: pointer for data read from 'reg_addr1' * * Read one or two dwords from alternate structure. Fields are indicated * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer * is not passed then only register at 'reg_addr0' is read. * **/ enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw, u32 reg_addr0, u32 *reg_val0, u32 reg_addr1, u32 *reg_val1) { struct i40e_aq_desc desc; struct i40e_aqc_alternate_write *cmd_resp = (struct i40e_aqc_alternate_write *)&desc.params.raw; enum i40e_status_code status; if (reg_val0 == NULL) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); cmd_resp->address0 = CPU_TO_LE32(reg_addr0); cmd_resp->address1 = CPU_TO_LE32(reg_addr1); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); if (status == I40E_SUCCESS) { *reg_val0 = LE32_TO_CPU(cmd_resp->data0); if (reg_val1 != NULL) *reg_val1 = LE32_TO_CPU(cmd_resp->data1); } return status; } /** * i40e_aq_alternate_read_indirect * @hw: pointer to the hardware structure * @addr: address of the alternate structure field * @dw_count: number of alternate structure fields to read * @buffer: pointer to the command buffer * * Read 'dw_count' dwords from alternate structure starting at 'addr' and * place them in 'buffer'. The buffer should be allocated by caller. * **/ enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw, u32 addr, u32 dw_count, void *buffer) { struct i40e_aq_desc desc; struct i40e_aqc_alternate_ind_write *cmd_resp = (struct i40e_aqc_alternate_ind_write *)&desc.params.raw; enum i40e_status_code status; if (buffer == NULL) return I40E_ERR_PARAM; /* Indirect command */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read_indirect); desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD); desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF); if (dw_count > (I40E_AQ_LARGE_BUF/4)) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); cmd_resp->address = CPU_TO_LE32(addr); cmd_resp->length = CPU_TO_LE32(dw_count); status = i40e_asq_send_command(hw, &desc, buffer, I40E_LO_DWORD(4*dw_count), NULL); return status; } /** * i40e_aq_alternate_clear * @hw: pointer to the HW structure. * * Clear the alternate structures of the port from which the function * is called. * **/ enum i40e_status_code i40e_aq_alternate_clear(struct i40e_hw *hw) { struct i40e_aq_desc desc; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_clear_port); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); return status; } /** * i40e_aq_alternate_write_done * @hw: pointer to the HW structure. * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS * @reset_needed: indicates the SW should trigger GLOBAL reset * * Indicates to the FW that alternate structures have been changed. * **/ enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw, u8 bios_mode, bool *reset_needed) { struct i40e_aq_desc desc; struct i40e_aqc_alternate_write_done *cmd = (struct i40e_aqc_alternate_write_done *)&desc.params.raw; enum i40e_status_code status; if (reset_needed == NULL) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_write_done); cmd->cmd_flags = CPU_TO_LE16(bios_mode); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); if (!status && reset_needed) *reset_needed = ((LE16_TO_CPU(cmd->cmd_flags) & I40E_AQ_ALTERNATE_RESET_NEEDED) != 0); return status; } /** * i40e_aq_set_oem_mode * @hw: pointer to the HW structure. * @oem_mode: the OEM mode to be used * * Sets the device to a specific operating mode. Currently the only supported * mode is no_clp, which causes FW to refrain from using Alternate RAM. * **/ enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw, u8 oem_mode) { struct i40e_aq_desc desc; struct i40e_aqc_alternate_write_done *cmd = (struct i40e_aqc_alternate_write_done *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_set_mode); cmd->cmd_flags = CPU_TO_LE16(oem_mode); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); return status; } /** * i40e_aq_resume_port_tx * @hw: pointer to the hardware structure * @cmd_details: pointer to command details structure or NULL * * Resume port's Tx traffic **/ enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_set_pci_config_data - store PCI bus info * @hw: pointer to hardware structure * @link_status: the link status word from PCI config space * * Stores the PCI bus info (speed, width, type) within the i40e_hw structure **/ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) { hw->bus.type = i40e_bus_type_pci_express; switch (link_status & I40E_PCI_LINK_WIDTH) { case I40E_PCI_LINK_WIDTH_1: hw->bus.width = i40e_bus_width_pcie_x1; break; case I40E_PCI_LINK_WIDTH_2: hw->bus.width = i40e_bus_width_pcie_x2; break; case I40E_PCI_LINK_WIDTH_4: hw->bus.width = i40e_bus_width_pcie_x4; break; case I40E_PCI_LINK_WIDTH_8: hw->bus.width = i40e_bus_width_pcie_x8; break; default: hw->bus.width = i40e_bus_width_unknown; break; } switch (link_status & I40E_PCI_LINK_SPEED) { case I40E_PCI_LINK_SPEED_2500: hw->bus.speed = i40e_bus_speed_2500; break; case I40E_PCI_LINK_SPEED_5000: hw->bus.speed = i40e_bus_speed_5000; break; case I40E_PCI_LINK_SPEED_8000: hw->bus.speed = i40e_bus_speed_8000; break; default: hw->bus.speed = i40e_bus_speed_unknown; break; } } /** * i40e_aq_debug_dump * @hw: pointer to the hardware structure * @cluster_id: specific cluster to dump * @table_id: table id within cluster * @start_index: index of line in the block to read * @buff_size: dump buffer size * @buff: dump buffer * @ret_buff_size: actual buffer size returned * @ret_next_table: next block to read * @ret_next_index: next index to read * @cmd_details: pointer to command details structure or NULL * * Dump internal FW/HW data for debug purposes. * **/ enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, u8 table_id, u32 start_index, u16 buff_size, void *buff, u16 *ret_buff_size, u8 *ret_next_table, u32 *ret_next_index, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_debug_dump_internals *cmd = (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; struct i40e_aqc_debug_dump_internals *resp = (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; enum i40e_status_code status; if (buff_size == 0 || !buff) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_dump_internals); /* Indirect Command */ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); cmd->cluster_id = cluster_id; cmd->table_id = table_id; cmd->idx = CPU_TO_LE32(start_index); desc.datalen = CPU_TO_LE16(buff_size); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); if (!status) { if (ret_buff_size != NULL) *ret_buff_size = LE16_TO_CPU(desc.datalen); if (ret_next_table != NULL) *ret_next_table = resp->table_id; if (ret_next_index != NULL) *ret_next_index = LE32_TO_CPU(resp->idx); } return status; } + /** + * i40e_enable_eee + * @hw: pointer to the hardware structure + * @enable: state of Energy Efficient Ethernet mode to be set + * + * Enables or disables Energy Efficient Ethernet (EEE) mode + * accordingly to @enable parameter. + **/ +enum i40e_status_code i40e_enable_eee(struct i40e_hw *hw, bool enable) +{ + struct i40e_aq_get_phy_abilities_resp abilities; + struct i40e_aq_set_phy_config config; + enum i40e_status_code status; + __le16 eee_capability; + + /* Get initial PHY capabilities */ + status = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities, + NULL); + if (status) + goto err; + + /* Check whether NIC configuration is compatible with Energy Efficient + * Ethernet (EEE) mode. + */ + if (abilities.eee_capability == 0) { + status = I40E_ERR_CONFIG; + goto err; + } + + /* Cache initial EEE capability */ + eee_capability = abilities.eee_capability; + + /* Get current configuration */ + status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities, + NULL); + if (status) + goto err; + + /* Cache current configuration */ + config.phy_type = abilities.phy_type; + config.phy_type_ext = abilities.phy_type_ext; + config.link_speed = abilities.link_speed; + config.abilities = abilities.abilities | + I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + config.eeer = abilities.eeer_val; + config.low_power_ctrl = abilities.d3_lpan; + config.fec_config = abilities.fec_cfg_curr_mod_ext_info & + I40E_AQ_PHY_FEC_CONFIG_MASK; + + /* Set desired EEE state */ + if (enable) { + config.eee_capability = eee_capability; + config.eeer |= I40E_PRTPM_EEER_TX_LPI_EN_MASK; + } else { + config.eee_capability = 0; + config.eeer &= ~I40E_PRTPM_EEER_TX_LPI_EN_MASK; + } + + /* Save modified config */ + status = i40e_aq_set_phy_config(hw, &config, NULL); +err: + return status; +} + +/** * i40e_read_bw_from_alt_ram * @hw: pointer to the hardware structure * @max_bw: pointer for max_bw read * @min_bw: pointer for min_bw read * @min_valid: pointer for bool that is TRUE if min_bw is a valid value * @max_valid: pointer for bool that is TRUE if max_bw is a valid value * * Read bw from the alternate ram for the given pf **/ enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw, u32 *max_bw, u32 *min_bw, bool *min_valid, bool *max_valid) { enum i40e_status_code status; u32 max_bw_addr, min_bw_addr; /* Calculate the address of the min/max bw registers */ max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + I40E_ALT_STRUCT_MAX_BW_OFFSET + (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + I40E_ALT_STRUCT_MIN_BW_OFFSET + (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); /* Read the bandwidths from alt ram */ status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, min_bw_addr, min_bw); if (*min_bw & I40E_ALT_BW_VALID_MASK) *min_valid = TRUE; else *min_valid = FALSE; if (*max_bw & I40E_ALT_BW_VALID_MASK) *max_valid = TRUE; else *max_valid = FALSE; return status; } /** * i40e_aq_configure_partition_bw * @hw: pointer to the hardware structure * @bw_data: Buffer holding valid pfs and bw limits * @cmd_details: pointer to command details * * Configure partitions guaranteed/max bw **/ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw, struct i40e_aqc_configure_partition_bw_data *bw_data, struct i40e_asq_cmd_details *cmd_details) { enum i40e_status_code status; struct i40e_aq_desc desc; u16 bwd_size = sizeof(*bw_data); i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_configure_partition_bw); /* Indirect command */ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); desc.datalen = CPU_TO_LE16(bwd_size); status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, cmd_details); return status; } /** * i40e_read_phy_register_clause22 * @hw: pointer to the HW structure * @reg: register address in the page * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Reads specified PHY register value **/ enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw, u16 reg, u8 phy_addr, u16 *value) { enum i40e_status_code status = I40E_ERR_TIMEOUT; u8 port_num = (u8)hw->func_caps.mdio_port_num; u32 command = 0; u16 retry = 1000; command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) | (I40E_MDIO_CLAUSE22_STCODE_MASK) | (I40E_GLGEN_MSCA_MDICMD_MASK); wr32(hw, I40E_GLGEN_MSCA(port_num), command); do { command = rd32(hw, I40E_GLGEN_MSCA(port_num)); if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { status = I40E_SUCCESS; break; } i40e_usec_delay(10); retry--; } while (retry); if (status) { i40e_debug(hw, I40E_DEBUG_PHY, "PHY: Can't write command to external PHY.\n"); } else { command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; } return status; } /** * i40e_write_phy_register_clause22 * @hw: pointer to the HW structure * @reg: register address in the page * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Writes specified PHY register value **/ enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw, u16 reg, u8 phy_addr, u16 value) { enum i40e_status_code status = I40E_ERR_TIMEOUT; u8 port_num = (u8)hw->func_caps.mdio_port_num; u32 command = 0; u16 retry = 1000; command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; wr32(hw, I40E_GLGEN_MSRWD(port_num), command); command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) | (I40E_MDIO_CLAUSE22_STCODE_MASK) | (I40E_GLGEN_MSCA_MDICMD_MASK); wr32(hw, I40E_GLGEN_MSCA(port_num), command); do { command = rd32(hw, I40E_GLGEN_MSCA(port_num)); if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { status = I40E_SUCCESS; break; } i40e_usec_delay(10); retry--; } while (retry); return status; } /** * i40e_read_phy_register_clause45 * @hw: pointer to the HW structure * @page: registers page number * @reg: register address in the page * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Reads specified PHY register value **/ enum i40e_status_code i40e_read_phy_register_clause45(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 *value) { enum i40e_status_code status = I40E_ERR_TIMEOUT; u32 command = 0; u16 retry = 1000; u8 port_num = (u8)hw->func_caps.mdio_port_num; command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | (I40E_MDIO_CLAUSE45_STCODE_MASK) | (I40E_GLGEN_MSCA_MDICMD_MASK) | (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); wr32(hw, I40E_GLGEN_MSCA(port_num), command); do { command = rd32(hw, I40E_GLGEN_MSCA(port_num)); if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { status = I40E_SUCCESS; break; } i40e_usec_delay(10); retry--; } while (retry); if (status) { i40e_debug(hw, I40E_DEBUG_PHY, "PHY: Can't write command to external PHY.\n"); goto phy_read_end; } command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) | (I40E_MDIO_CLAUSE45_STCODE_MASK) | (I40E_GLGEN_MSCA_MDICMD_MASK) | (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); status = I40E_ERR_TIMEOUT; retry = 1000; wr32(hw, I40E_GLGEN_MSCA(port_num), command); do { command = rd32(hw, I40E_GLGEN_MSCA(port_num)); if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { status = I40E_SUCCESS; break; } i40e_usec_delay(10); retry--; } while (retry); if (!status) { command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; } else { i40e_debug(hw, I40E_DEBUG_PHY, "PHY: Can't read register value from external PHY.\n"); } phy_read_end: return status; } /** * i40e_write_phy_register_clause45 * @hw: pointer to the HW structure * @page: registers page number * @reg: register address in the page * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Writes value to specified PHY register **/ enum i40e_status_code i40e_write_phy_register_clause45(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 value) { enum i40e_status_code status = I40E_ERR_TIMEOUT; u32 command = 0; u16 retry = 1000; u8 port_num = (u8)hw->func_caps.mdio_port_num; command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | (I40E_MDIO_CLAUSE45_STCODE_MASK) | (I40E_GLGEN_MSCA_MDICMD_MASK) | (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); wr32(hw, I40E_GLGEN_MSCA(port_num), command); do { command = rd32(hw, I40E_GLGEN_MSCA(port_num)); if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { status = I40E_SUCCESS; break; } i40e_usec_delay(10); retry--; } while (retry); if (status) { i40e_debug(hw, I40E_DEBUG_PHY, "PHY: Can't write command to external PHY.\n"); goto phy_write_end; } command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; wr32(hw, I40E_GLGEN_MSRWD(port_num), command); command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) | (I40E_MDIO_CLAUSE45_STCODE_MASK) | (I40E_GLGEN_MSCA_MDICMD_MASK) | (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); status = I40E_ERR_TIMEOUT; retry = 1000; wr32(hw, I40E_GLGEN_MSCA(port_num), command); do { command = rd32(hw, I40E_GLGEN_MSCA(port_num)); if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { status = I40E_SUCCESS; break; } i40e_usec_delay(10); retry--; } while (retry); phy_write_end: return status; } /** * i40e_write_phy_register * @hw: pointer to the HW structure * @page: registers page number * @reg: register address in the page * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Writes value to specified PHY register **/ enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 value) { enum i40e_status_code status; switch (hw->device_id) { case I40E_DEV_ID_1G_BASE_T_X722: status = i40e_write_phy_register_clause22(hw, reg, phy_addr, value); break; case I40E_DEV_ID_10G_BASE_T: case I40E_DEV_ID_10G_BASE_T4: + case I40E_DEV_ID_10G_BASE_T_BC: + case I40E_DEV_ID_5G_BASE_T_BC: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_25G_B: case I40E_DEV_ID_25G_SFP28: status = i40e_write_phy_register_clause45(hw, page, reg, phy_addr, value); break; default: status = I40E_ERR_UNKNOWN_PHY; break; } return status; } /** * i40e_read_phy_register * @hw: pointer to the HW structure * @page: registers page number * @reg: register address in the page * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Reads specified PHY register value **/ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 *value) { enum i40e_status_code status; switch (hw->device_id) { case I40E_DEV_ID_1G_BASE_T_X722: status = i40e_read_phy_register_clause22(hw, reg, phy_addr, value); break; case I40E_DEV_ID_10G_BASE_T: case I40E_DEV_ID_10G_BASE_T4: + case I40E_DEV_ID_10G_BASE_T_BC: + case I40E_DEV_ID_5G_BASE_T_BC: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_25G_B: case I40E_DEV_ID_25G_SFP28: status = i40e_read_phy_register_clause45(hw, page, reg, phy_addr, value); break; default: status = I40E_ERR_UNKNOWN_PHY; break; } return status; } /** * i40e_get_phy_address * @hw: pointer to the HW structure * @dev_num: PHY port num that address we want * * Gets PHY address for current port **/ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) { u8 port_num = (u8)hw->func_caps.mdio_port_num; u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num)); return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f; } /** * i40e_blink_phy_led * @hw: pointer to the HW structure * @time: time how long led will blinks in secs * @interval: gap between LED on and off in msecs * * Blinks PHY link LED **/ enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw, u32 time, u32 interval) { enum i40e_status_code status = I40E_SUCCESS; u32 i; u16 led_ctl = 0; u16 gpio_led_port; u16 led_reg; u16 led_addr = I40E_PHY_LED_PROV_REG_1; u8 phy_addr = 0; u8 port_num; i = rd32(hw, I40E_PFGEN_PORTNUM); port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); phy_addr = i40e_get_phy_address(hw, port_num); for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, led_addr++) { status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, led_addr, phy_addr, &led_reg); if (status) goto phy_blinking_end; led_ctl = led_reg; if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { led_reg = 0; status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, led_addr, phy_addr, led_reg); if (status) goto phy_blinking_end; break; } } if (time > 0 && interval > 0) { for (i = 0; i < time * 1000; i += interval) { status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, led_addr, phy_addr, &led_reg); if (status) goto restore_config; if (led_reg & I40E_PHY_LED_MANUAL_ON) led_reg = 0; else led_reg = I40E_PHY_LED_MANUAL_ON; status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, led_addr, phy_addr, led_reg); if (status) goto restore_config; i40e_msec_delay(interval); } } restore_config: status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, led_addr, phy_addr, led_ctl); phy_blinking_end: return status; } /** * i40e_led_get_reg - read LED register * @hw: pointer to the HW structure * @led_addr: LED register address * @reg_val: read register value **/ enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, u32 *reg_val) { enum i40e_status_code status; u8 phy_addr = 0; *reg_val = 0; if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL, I40E_PHY_COM_REG_PAGE, TRUE, I40E_PHY_LED_PROV_REG_1, reg_val, NULL); } else { phy_addr = i40e_get_phy_address(hw, hw->port); status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, led_addr, phy_addr, (u16 *)reg_val); } return status; } /** * i40e_led_set_reg - write LED register * @hw: pointer to the HW structure * @led_addr: LED register address * @reg_val: register value to write **/ enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, u32 reg_val) { enum i40e_status_code status; u8 phy_addr = 0; if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { status = i40e_aq_set_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL, I40E_PHY_COM_REG_PAGE, TRUE, I40E_PHY_LED_PROV_REG_1, reg_val, NULL); } else { phy_addr = i40e_get_phy_address(hw, hw->port); status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, led_addr, phy_addr, (u16)reg_val); } return status; } /** * i40e_led_get_phy - return current on/off mode * @hw: pointer to the hw struct * @led_addr: address of led register to use * @val: original value of register to use * **/ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, u16 *val) { enum i40e_status_code status = I40E_SUCCESS; u16 gpio_led_port; u32 reg_val_aq; u16 temp_addr; u8 phy_addr = 0; u16 reg_val; if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL, I40E_PHY_COM_REG_PAGE, TRUE, I40E_PHY_LED_PROV_REG_1, ®_val_aq, NULL); if (status == I40E_SUCCESS) *val = (u16)reg_val_aq; return status; } temp_addr = I40E_PHY_LED_PROV_REG_1; phy_addr = i40e_get_phy_address(hw, hw->port); for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, temp_addr++) { status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, temp_addr, phy_addr, ®_val); if (status) return status; *val = reg_val; if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) { *led_addr = temp_addr; break; } } return status; } /** * i40e_led_set_phy * @hw: pointer to the HW structure * @on: TRUE or FALSE * @led_addr: address of led register to use * @mode: original val plus bit for set or ignore * * Set led's on or off when controlled by the PHY * **/ enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on, u16 led_addr, u32 mode) { enum i40e_status_code status = I40E_SUCCESS; u32 led_ctl = 0; u32 led_reg = 0; status = i40e_led_get_reg(hw, led_addr, &led_reg); if (status) return status; led_ctl = led_reg; if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { led_reg = 0; status = i40e_led_set_reg(hw, led_addr, led_reg); if (status) return status; } status = i40e_led_get_reg(hw, led_addr, &led_reg); if (status) goto restore_config; if (on) led_reg = I40E_PHY_LED_MANUAL_ON; else led_reg = 0; status = i40e_led_set_reg(hw, led_addr, led_reg); if (status) goto restore_config; if (mode & I40E_PHY_LED_MODE_ORIG) { led_ctl = (mode & I40E_PHY_LED_MODE_MASK); status = i40e_led_set_reg(hw, led_addr, led_ctl); } return status; restore_config: status = i40e_led_set_reg(hw, led_addr, led_ctl); return status; } /** + * i40e_get_phy_lpi_status - read LPI status from PHY or MAC register + * @hw: pointer to the hw struct + * @stat: pointer to structure with status of rx and tx lpi + * + * Read LPI state directly from external PHY register or from MAC + * register, depending on device ID and current link speed. + */ +enum i40e_status_code i40e_get_phy_lpi_status(struct i40e_hw *hw, + struct i40e_hw_port_stats *stat) +{ + enum i40e_status_code ret = I40E_SUCCESS; + u32 val; + + stat->rx_lpi_status = 0; + stat->tx_lpi_status = 0; + + if ((hw->device_id == I40E_DEV_ID_10G_BASE_T_BC || + hw->device_id == I40E_DEV_ID_5G_BASE_T_BC) && + (hw->phy.link_info.link_speed == I40E_LINK_SPEED_2_5GB || + hw->phy.link_info.link_speed == I40E_LINK_SPEED_5GB)) { + ret = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL, + I40E_BCM_PHY_PCS_STATUS1_PAGE, + TRUE, + I40E_BCM_PHY_PCS_STATUS1_REG, + &val, NULL); + + if (ret != I40E_SUCCESS) + return ret; + + stat->rx_lpi_status = !!(val & I40E_BCM_PHY_PCS_STATUS1_RX_LPI); + stat->tx_lpi_status = !!(val & I40E_BCM_PHY_PCS_STATUS1_TX_LPI); + + return ret; + } + + val = rd32(hw, I40E_PRTPM_EEE_STAT); + stat->rx_lpi_status = (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> + I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; + stat->tx_lpi_status = (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> + I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; + + return ret; +} + +/** + * i40e_get_lpi_counters - read LPI counters from EEE statistics + * @hw: pointer to the hw struct + * @tx_counter: pointer to memory for TX LPI counter + * @rx_counter: pointer to memory for RX LPI counter + * @is_clear: returns TRUE if counters are clear after read + * + * Read Low Power Idle (LPI) mode counters from Energy Efficient + * Ethernet (EEE) statistics. + **/ +enum i40e_status_code i40e_get_lpi_counters(struct i40e_hw *hw, + u32 *tx_counter, u32 *rx_counter, + bool *is_clear) +{ + /* only X710-T*L requires special handling of counters + * for other devices we just read the MAC registers + */ + if ((hw->device_id == I40E_DEV_ID_10G_BASE_T_BC || + hw->device_id == I40E_DEV_ID_5G_BASE_T_BC) && + hw->phy.link_info.link_speed != I40E_LINK_SPEED_1GB) { + enum i40e_status_code retval; + u32 cmd_status; + + *is_clear = FALSE; + retval = i40e_aq_run_phy_activity(hw, + I40E_AQ_RUN_PHY_ACT_ID_USR_DFND, + I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_STAT, + &cmd_status, tx_counter, rx_counter, NULL); + + if (!retval && cmd_status != I40E_AQ_RUN_PHY_ACT_CMD_STAT_SUCC) + retval = I40E_ERR_ADMIN_QUEUE_ERROR; + + return retval; + } + + *is_clear = TRUE; + *tx_counter = rd32(hw, I40E_PRTPM_TLPIC); + *rx_counter = rd32(hw, I40E_PRTPM_RLPIC); + + return I40E_SUCCESS; +} + +/** + * i40e_get_lpi_duration - read LPI time duration from EEE statistics + * @hw: pointer to the hw struct + * @stat: pointer to structure with status of rx and tx lpi + * @tx_duration: pointer to memory for TX LPI time duration + * @rx_duration: pointer to memory for RX LPI time duration + * + * Read Low Power Idle (LPI) mode time duration from Energy Efficient + * Ethernet (EEE) statistics. + */ +enum i40e_status_code i40e_get_lpi_duration(struct i40e_hw *hw, + struct i40e_hw_port_stats *stat, + u64 *tx_duration, u64 *rx_duration) +{ + u32 tx_time_dur, rx_time_dur; + enum i40e_status_code retval; + u32 cmd_status; + + if (hw->device_id != I40E_DEV_ID_10G_BASE_T_BC && + hw->device_id != I40E_DEV_ID_5G_BASE_T_BC) + return I40E_ERR_NOT_IMPLEMENTED; + + retval = i40e_aq_run_phy_activity + (hw, I40E_AQ_RUN_PHY_ACT_ID_USR_DFND, + I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_DUR, + &cmd_status, &tx_time_dur, &rx_time_dur, NULL); + + if (retval) + return retval; + if ((cmd_status & I40E_AQ_RUN_PHY_ACT_CMD_STAT_MASK) != + I40E_AQ_RUN_PHY_ACT_CMD_STAT_SUCC) + return I40E_ERR_ADMIN_QUEUE_ERROR; + + if (hw->phy.link_info.link_speed == I40E_LINK_SPEED_1GB && + !tx_time_dur && !rx_time_dur && + stat->tx_lpi_status && stat->rx_lpi_status) { + retval = i40e_aq_run_phy_activity + (hw, I40E_AQ_RUN_PHY_ACT_ID_USR_DFND, + I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_STAT_DUR, + &cmd_status, + &tx_time_dur, &rx_time_dur, NULL); + + if (retval) + return retval; + if ((cmd_status & I40E_AQ_RUN_PHY_ACT_CMD_STAT_MASK) != + I40E_AQ_RUN_PHY_ACT_CMD_STAT_SUCC) + return I40E_ERR_ADMIN_QUEUE_ERROR; + tx_time_dur = 0; + rx_time_dur = 0; + } + + *tx_duration = tx_time_dur; + *rx_duration = rx_time_dur; + + return retval; +} + +/** + * i40e_lpi_stat_update - update LPI counters with values relative to offset + * @hw: pointer to the hw struct + * @offset_loaded: flag indicating need of writing current value to offset + * @tx_offset: pointer to offset of TX LPI counter + * @tx_stat: pointer to value of TX LPI counter + * @rx_offset: pointer to offset of RX LPI counter + * @rx_stat: pointer to value of RX LPI counter + * + * Update Low Power Idle (LPI) mode counters while having regard to passed + * offsets. + **/ +enum i40e_status_code i40e_lpi_stat_update(struct i40e_hw *hw, + bool offset_loaded, u64 *tx_offset, + u64 *tx_stat, u64 *rx_offset, + u64 *rx_stat) +{ + enum i40e_status_code retval; + u32 tx_counter, rx_counter; + bool is_clear; + + retval = i40e_get_lpi_counters(hw, &tx_counter, &rx_counter, &is_clear); + if (retval) + goto err; + + if (is_clear) { + *tx_stat += tx_counter; + *rx_stat += rx_counter; + } else { + if (!offset_loaded) { + *tx_offset = tx_counter; + *rx_offset = rx_counter; + } + + *tx_stat = (tx_counter >= *tx_offset) ? + (u32)(tx_counter - *tx_offset) : + (u32)((tx_counter + BIT_ULL(32)) - *tx_offset); + *rx_stat = (rx_counter >= *rx_offset) ? + (u32)(rx_counter - *rx_offset) : + (u32)((rx_counter + BIT_ULL(32)) - *rx_offset); + } +err: + return retval; +} + +/** * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register * @hw: pointer to the hw struct * @reg_addr: register address * @reg_val: ptr to register value * @cmd_details: pointer to command details structure or NULL * * Use the firmware to read the Rx control register, * especially useful if the Rx unit is under heavy pressure **/ enum i40e_status_code i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, u32 reg_addr, u32 *reg_val, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; enum i40e_status_code status; if (reg_val == NULL) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read); cmd_resp->address = CPU_TO_LE32(reg_addr); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status == I40E_SUCCESS) *reg_val = LE32_TO_CPU(cmd_resp->value); return status; } /** * i40e_read_rx_ctl - read from an Rx control register * @hw: pointer to the hw struct * @reg_addr: register address **/ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) { enum i40e_status_code status = I40E_SUCCESS; bool use_register; int retry = 5; u32 val = 0; use_register = (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5)) || (hw->mac.type == I40E_MAC_X722)); if (!use_register) { do_retry: status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { i40e_msec_delay(1); retry--; goto do_retry; } } /* if the AQ access failed, try the old-fashioned way */ if (status || use_register) val = rd32(hw, reg_addr); return val; } /** * i40e_aq_rx_ctl_write_register * @hw: pointer to the hw struct * @reg_addr: register address * @reg_val: register value * @cmd_details: pointer to command details structure or NULL * * Use the firmware to write to an Rx control register, * especially useful if the Rx unit is under heavy pressure **/ enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, u32 reg_addr, u32 reg_val, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_rx_ctl_reg_read_write *cmd = (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write); cmd->address = CPU_TO_LE32(reg_addr); cmd->value = CPU_TO_LE32(reg_val); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_write_rx_ctl - write to an Rx control register * @hw: pointer to the hw struct * @reg_addr: register address * @reg_val: register value **/ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) { enum i40e_status_code status = I40E_SUCCESS; bool use_register; int retry = 5; use_register = (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5)) || (hw->mac.type == I40E_MAC_X722)); if (!use_register) { do_retry: status = i40e_aq_rx_ctl_write_register(hw, reg_addr, reg_val, NULL); if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { i40e_msec_delay(1); retry--; goto do_retry; } } /* if the AQ access failed, try the old-fashioned way */ if (status || use_register) wr32(hw, reg_addr, reg_val); } /** * i40e_mdio_if_number_selection - MDIO I/F number selection * @hw: pointer to the hw struct * @set_mdio: use MDIO I/F number specified by mdio_num * @mdio_num: MDIO I/F number * @cmd: pointer to PHY Register command structure **/ static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio, u8 mdio_num, struct i40e_aqc_phy_register_access *cmd) { if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) { if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED) cmd->cmd_flags |= I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER | ((mdio_num << I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) & I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK); else i40e_debug(hw, I40E_DEBUG_PHY, "MDIO I/F number selection not supported by current FW version.\n"); } } /** * i40e_aq_set_phy_register_ext * @hw: pointer to the hw struct * @phy_select: select which phy should be accessed * @dev_addr: PHY device address * @page_change: enable auto page change * @set_mdio: use MDIO I/F number specified by mdio_num * @mdio_num: MDIO I/F number * @reg_addr: PHY register address * @reg_val: new register value * @cmd_details: pointer to command details structure or NULL * * Write the external PHY register. * NOTE: In common cases MDIO I/F number should not be changed, thats why you * may use simple wrapper i40e_aq_set_phy_register. **/ enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw, u8 phy_select, u8 dev_addr, bool page_change, bool set_mdio, u8 mdio_num, u32 reg_addr, u32 reg_val, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_phy_register_access *cmd = (struct i40e_aqc_phy_register_access *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_phy_register); cmd->phy_interface = phy_select; cmd->dev_addres = dev_addr; cmd->reg_address = CPU_TO_LE32(reg_addr); cmd->reg_value = CPU_TO_LE32(reg_val); if (!page_change) cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_phy_register_ext * @hw: pointer to the hw struct * @phy_select: select which phy should be accessed * @dev_addr: PHY device address * @page_change: enable auto page change * @set_mdio: use MDIO I/F number specified by mdio_num * @mdio_num: MDIO I/F number * @reg_addr: PHY register address * @reg_val: read register value * @cmd_details: pointer to command details structure or NULL * * Read the external PHY register. * NOTE: In common cases MDIO I/F number should not be changed, thats why you * may use simple wrapper i40e_aq_get_phy_register. **/ enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw, u8 phy_select, u8 dev_addr, bool page_change, bool set_mdio, u8 mdio_num, u32 reg_addr, u32 *reg_val, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_phy_register_access *cmd = (struct i40e_aqc_phy_register_access *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_phy_register); cmd->phy_interface = phy_select; cmd->dev_addres = dev_addr; cmd->reg_address = CPU_TO_LE32(reg_addr); if (!page_change) cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) *reg_val = LE32_TO_CPU(cmd->reg_value); return status; } + +/** + * i40e_aq_run_phy_activity + * @hw: pointer to the hw struct + * @activity_id: ID of DNL activity to run + * @dnl_opcode: opcode passed to DNL script + * @cmd_status: pointer to memory to write return value of DNL script + * @data0: pointer to memory for first 4 bytes of data returned by DNL script + * @data1: pointer to memory for last 4 bytes of data returned by DNL script + * @cmd_details: pointer to command details structure or NULL + * + * Run DNL admin command. + **/ +enum i40e_status_code +i40e_aq_run_phy_activity(struct i40e_hw *hw, u16 activity_id, u32 dnl_opcode, + u32 *cmd_status, u32 *data0, u32 *data1, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aqc_run_phy_activity *cmd; + enum i40e_status_code retval; + struct i40e_aq_desc desc; + + cmd = (struct i40e_aqc_run_phy_activity *)&desc.params.raw; + + if (!cmd_status || !data0 || !data1) { + retval = I40E_ERR_PARAM; + goto err; + } + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_run_phy_activity); + + cmd->activity_id = CPU_TO_LE16(activity_id); + cmd->params.cmd.dnl_opcode = CPU_TO_LE32(dnl_opcode); + + retval = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + if (retval) + goto err; + + *cmd_status = LE32_TO_CPU(cmd->params.resp.cmd_status); + *data0 = LE32_TO_CPU(cmd->params.resp.data0); + *data1 = LE32_TO_CPU(cmd->params.resp.data1); +err: + return retval; +} + /** * i40e_aq_send_msg_to_pf * @hw: pointer to the hardware structure * @v_opcode: opcodes for VF-PF communication * @v_retval: return error code * @msg: pointer to the msg buffer * @msglen: msg length * @cmd_details: pointer to command details * * Send message to PF driver using admin queue. By default, this message * is sent asynchronously, i.e. i40e_asq_send_command() does not wait for * completion before returning. **/ enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw, enum virtchnl_ops v_opcode, enum i40e_status_code v_retval, u8 *msg, u16 msglen, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_asq_cmd_details details; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI); desc.cookie_high = CPU_TO_LE32(v_opcode); desc.cookie_low = CPU_TO_LE32(v_retval); if (msglen) { desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (msglen > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); desc.datalen = CPU_TO_LE16(msglen); } if (!cmd_details) { i40e_memset(&details, 0, sizeof(details), I40E_NONDMA_MEM); details.async = TRUE; cmd_details = &details; } status = i40e_asq_send_command(hw, (struct i40e_aq_desc *)&desc, msg, msglen, cmd_details); return status; } /** * i40e_vf_parse_hw_config * @hw: pointer to the hardware structure * @msg: pointer to the virtual channel VF resource structure * * Given a VF resource message from the PF, populate the hw struct * with appropriate information. **/ void i40e_vf_parse_hw_config(struct i40e_hw *hw, struct virtchnl_vf_resource *msg) { struct virtchnl_vsi_resource *vsi_res; int i; vsi_res = &msg->vsi_res[0]; hw->dev_caps.num_vsis = msg->num_vsis; hw->dev_caps.num_rx_qp = msg->num_queue_pairs; hw->dev_caps.num_tx_qp = msg->num_queue_pairs; hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; hw->dev_caps.dcb = msg->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_L2; hw->dev_caps.iwarp = (msg->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0; for (i = 0; i < msg->num_vsis; i++) { if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) { i40e_memcpy(hw->mac.perm_addr, vsi_res->default_mac_addr, ETH_ALEN, I40E_NONDMA_TO_NONDMA); i40e_memcpy(hw->mac.addr, vsi_res->default_mac_addr, ETH_ALEN, I40E_NONDMA_TO_NONDMA); } vsi_res++; } } /** * i40e_vf_reset * @hw: pointer to the hardware structure * * Send a VF_RESET message to the PF. Does not wait for response from PF * as none will be forthcoming. Immediately after calling this function, * the admin queue should be shut down and (optionally) reinitialized. **/ enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw) { return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF, I40E_SUCCESS, NULL, 0, NULL); } /** * i40e_aq_set_arp_proxy_config * @hw: pointer to the HW structure * @proxy_config: pointer to proxy config command table struct * @cmd_details: pointer to command details * * Set ARP offload parameters from pre-populated * i40e_aqc_arp_proxy_data struct **/ enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw, struct i40e_aqc_arp_proxy_data *proxy_config, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; enum i40e_status_code status; if (!proxy_config) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_proxy_config); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); desc.params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD((u64)proxy_config)); desc.params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD((u64)proxy_config)); desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_arp_proxy_data)); status = i40e_asq_send_command(hw, &desc, proxy_config, sizeof(struct i40e_aqc_arp_proxy_data), cmd_details); return status; } /** * i40e_aq_opc_set_ns_proxy_table_entry * @hw: pointer to the HW structure * @ns_proxy_table_entry: pointer to NS table entry command struct * @cmd_details: pointer to command details * * Set IPv6 Neighbor Solicitation (NS) protocol offload parameters * from pre-populated i40e_aqc_ns_proxy_data struct **/ enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw, struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; enum i40e_status_code status; if (!ns_proxy_table_entry) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_ns_proxy_table_entry); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); desc.params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD((u64)ns_proxy_table_entry)); desc.params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD((u64)ns_proxy_table_entry)); desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_ns_proxy_data)); status = i40e_asq_send_command(hw, &desc, ns_proxy_table_entry, sizeof(struct i40e_aqc_ns_proxy_data), cmd_details); return status; } /** * i40e_aq_set_clear_wol_filter * @hw: pointer to the hw struct * @filter_index: index of filter to modify (0-7) * @filter: buffer containing filter to be set * @set_filter: TRUE to set filter, FALSE to clear filter * @no_wol_tco: if TRUE, pass through packets cannot cause wake-up * if FALSE, pass through packets may cause wake-up * @filter_valid: TRUE if filter action is valid * @no_wol_tco_valid: TRUE if no WoL in TCO traffic action valid * @cmd_details: pointer to command details structure or NULL * * Set or clear WoL filter for port attached to the PF **/ enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw, u8 filter_index, struct i40e_aqc_set_wol_filter_data *filter, bool set_filter, bool no_wol_tco, bool filter_valid, bool no_wol_tco_valid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_wol_filter *cmd = (struct i40e_aqc_set_wol_filter *)&desc.params.raw; enum i40e_status_code status; u16 cmd_flags = 0; u16 valid_flags = 0; u16 buff_len = 0; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_wol_filter); if (filter_index >= I40E_AQC_MAX_NUM_WOL_FILTERS) return I40E_ERR_PARAM; cmd->filter_index = CPU_TO_LE16(filter_index); if (set_filter) { if (!filter) return I40E_ERR_PARAM; cmd_flags |= I40E_AQC_SET_WOL_FILTER; cmd_flags |= I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR; } if (no_wol_tco) cmd_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL; cmd->cmd_flags = CPU_TO_LE16(cmd_flags); if (filter_valid) valid_flags |= I40E_AQC_SET_WOL_FILTER_ACTION_VALID; if (no_wol_tco_valid) valid_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID; cmd->valid_flags = CPU_TO_LE16(valid_flags); buff_len = sizeof(*filter); desc.datalen = CPU_TO_LE16(buff_len); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)filter)); cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)filter)); status = i40e_asq_send_command(hw, &desc, filter, buff_len, cmd_details); return status; } /** * i40e_aq_get_wake_event_reason * @hw: pointer to the hw struct * @wake_reason: return value, index of matching filter * @cmd_details: pointer to command details structure or NULL * * Get information for the reason of a Wake Up event **/ enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw, u16 *wake_reason, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_wake_reason_completion *resp = (struct i40e_aqc_get_wake_reason_completion *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_wake_reason); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status == I40E_SUCCESS) *wake_reason = LE16_TO_CPU(resp->wake_reason); return status; } /** * i40e_aq_clear_all_wol_filters * @hw: pointer to the hw struct * @cmd_details: pointer to command details structure or NULL * * Get information for the reason of a Wake Up event **/ enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_clear_all_wol_filters); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } Index: head/sys/dev/ixl/i40e_devids.h =================================================================== --- head/sys/dev/ixl/i40e_devids.h (revision 365230) +++ head/sys/dev/ixl/i40e_devids.h (revision 365231) @@ -1,75 +1,82 @@ /****************************************************************************** Copyright (c) 2013-2018, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _I40E_DEVIDS_H_ #define _I40E_DEVIDS_H_ /* Vendor ID */ #define I40E_INTEL_VENDOR_ID 0x8086 /* Device IDs */ #define I40E_DEV_ID_X710_N3000 0x0CF8 #define I40E_DEV_ID_XXV710_N3000 0x0D58 #define I40E_DEV_ID_SFP_XL710 0x1572 #define I40E_DEV_ID_QEMU 0x1574 #define I40E_DEV_ID_KX_B 0x1580 #define I40E_DEV_ID_KX_C 0x1581 #define I40E_DEV_ID_QSFP_A 0x1583 #define I40E_DEV_ID_QSFP_B 0x1584 #define I40E_DEV_ID_QSFP_C 0x1585 #define I40E_DEV_ID_10G_BASE_T 0x1586 #define I40E_DEV_ID_20G_KR2 0x1587 #define I40E_DEV_ID_20G_KR2_A 0x1588 #define I40E_DEV_ID_10G_BASE_T4 0x1589 #define I40E_DEV_ID_25G_B 0x158A #define I40E_DEV_ID_25G_SFP28 0x158B +#define I40E_DEV_ID_10G_BASE_T_BC 0x15FF +#define I40E_DEV_ID_10G_B 0x104F +#define I40E_DEV_ID_10G_SFP 0x104E +#define I40E_DEV_ID_5G_BASE_T_BC 0x101F +#define I40E_IS_X710TL_DEVICE(d) \ + (((d) == I40E_DEV_ID_10G_BASE_T_BC) || \ + ((d) == I40E_DEV_ID_5G_BASE_T_BC)) #define I40E_DEV_ID_VF 0x154C #define I40E_DEV_ID_VF_HV 0x1571 #define I40E_DEV_ID_ADAPTIVE_VF 0x1889 #define I40E_DEV_ID_KX_X722 0x37CE #define I40E_DEV_ID_QSFP_X722 0x37CF #define I40E_DEV_ID_SFP_X722 0x37D0 #define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 #define I40E_DEV_ID_SFP_I_X722 0x37D3 #define I40E_DEV_ID_X722_VF 0x37CD #define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ (d) == I40E_DEV_ID_QSFP_B || \ (d) == I40E_DEV_ID_QSFP_C) #define i40e_is_25G_device(d) ((d) == I40E_DEV_ID_25G_B || \ (d) == I40E_DEV_ID_25G_SFP28) #endif /* _I40E_DEVIDS_H_ */ Index: head/sys/dev/ixl/i40e_prototype.h =================================================================== --- head/sys/dev/ixl/i40e_prototype.h (revision 365230) +++ head/sys/dev/ixl/i40e_prototype.h (revision 365231) @@ -1,609 +1,630 @@ /****************************************************************************** Copyright (c) 2013-2018, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _I40E_PROTOTYPE_H_ #define _I40E_PROTOTYPE_H_ #include "i40e_type.h" #include "i40e_alloc.h" #include "virtchnl.h" /* Prototypes for shared code functions that are not in * the standard function pointer structures. These are * mostly because they are needed even before the init * has happened and will assist in the early SW and FW * setup. */ /* adminq functions */ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw); enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw); enum i40e_status_code i40e_init_asq(struct i40e_hw *hw); enum i40e_status_code i40e_init_arq(struct i40e_hw *hw); enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw); enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw); enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw); enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw); u16 i40e_clean_asq(struct i40e_hw *hw); void i40e_free_adminq_asq(struct i40e_hw *hw); void i40e_free_adminq_arq(struct i40e_hw *hw); enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr); void i40e_adminq_init_ring_data(struct i40e_hw *hw); enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw, struct i40e_arq_event_info *e, u16 *events_pending); enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ u16 buff_size, struct i40e_asq_cmd_details *cmd_details); bool i40e_asq_done(struct i40e_hw *hw); /* debug function for adminq */ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, void *buffer, u16 buf_len); void i40e_idle_aq(struct i40e_hw *hw); bool i40e_check_asq_alive(struct i40e_hw *hw); enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, bool pf_lut, u8 *lut, u16 lut_size); enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid, bool pf_lut, u8 *lut, u16 lut_size); enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw, u16 seid, struct i40e_aqc_get_set_rss_key_data *key); enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw, u16 seid, struct i40e_aqc_get_set_rss_key_data *key); const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err); u32 i40e_led_get(struct i40e_hw *hw); void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink); enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on, u16 led_addr, u32 mode); enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, u16 *val); enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw, u32 time, u32 interval); enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, u32 *reg_val); enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, u32 reg_val); +enum i40e_status_code i40e_get_phy_lpi_status(struct i40e_hw *hw, + struct i40e_hw_port_stats *stats); +enum i40e_status_code i40e_get_lpi_counters(struct i40e_hw *hw, u32 *tx_counter, + u32 *rx_counter, bool *is_clear); +enum i40e_status_code i40e_lpi_stat_update(struct i40e_hw *hw, + bool offset_loaded, u64 *tx_offset, + u64 *tx_stat, u64 *rx_offset, + u64 *rx_stat); +enum i40e_status_code i40e_get_lpi_duration(struct i40e_hw *hw, + struct i40e_hw_port_stats *stat, + u64 *tx_duration, u64 *rx_duration); /* admin send queue commands */ enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw, u16 *fw_major_version, u16 *fw_minor_version, u32 *fw_build, u16 *api_major_version, u16 *api_minor_version, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw, u32 reg_addr, u64 reg_val, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_debug_read_register(struct i40e_hw *hw, u32 reg_addr, u64 *reg_val, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw, bool qualified_modules, bool report_init, struct i40e_aq_get_phy_abilities_resp *abilities, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, struct i40e_aq_set_phy_config *config, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, bool atomic_reset); enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw, u16 max_frame_size, bool crc_en, u16 pacing, bool auto_drop_blocking_packets, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw, u64 *advt_reg, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw, u64 *advt_reg, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw, u8 lb_level, u8 lb_type, u8 speed, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw, bool enable_link, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw, bool enable_lse, struct i40e_link_status *link, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw, u64 advt_reg, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw, struct i40e_driver_version *dv, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, u16 vsi_id, bool set_filter, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details, bool rx_only_promisc); enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw, u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, u16 seid, bool enable, u16 vid, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, u16 seid, bool enable, u16 vid, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, u16 seid, bool enable, u16 vid, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, u16 seid, bool enable, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, u16 downlink_seid, u8 enabled_tc, bool default_port, u16 *pveb_seid, bool enable_stats, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw, u16 veb_seid, u16 *switch_id, bool *floating, u16 *statistic_index, u16 *vebs_used, u16 *vebs_free, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id, struct i40e_aqc_add_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id, struct i40e_aqc_remove_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, struct i40e_asq_cmd_details *cmd_details, u16 *rule_id, u16 *rules_used, u16 *rules_free); enum i40e_status_code i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, struct i40e_asq_cmd_details *cmd_details, u16 *rules_used, u16 *rules_free); enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id, struct i40e_aqc_add_remove_vlan_element_data *v_list, u8 count, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id, struct i40e_aqc_add_remove_vlan_element_data *v_list, u8 count, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw, struct i40e_aqc_get_switch_config_resp *buf, u16 buf_size, u16 *start_seid, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, u16 flags, u16 valid_flags, u8 mode, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw, enum i40e_aq_resources_ids resource, enum i40e_aq_resource_access_type access, u8 sdp_number, u64 *timeout, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_release_resource(struct i40e_hw *hw, enum i40e_aq_resources_ids resource, u8 sdp_number, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, void *data, bool last_command, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, bool last_command, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_read_nvm_config(struct i40e_hw *hw, u8 cmd_flags, u32 field_id, void *data, u16 buf_size, u16 *element_count, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw, u8 cmd_flags, void *data, u16 buf_size, u16 element_count, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw, void *buff, u16 buff_size, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw, void *buff, u16 buff_size, u16 *data_size, enum i40e_admin_queue_opc list_type_opc, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, void *data, bool last_command, u8 preservation_flags, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, u8 mib_type, void *buff, u16 buff_size, u16 *local_len, u16 *remote_len, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw, u8 mib_type, void *buff, u16 buff_size, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, bool enable_update, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, bool persist, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, void *buff, u16 buff_size, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw, bool start_agent, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw, u16 udp_port, u8 protocol_index, u8 *filter_index, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw, u8 *num_entries, struct i40e_aqc_switch_resource_alloc_element_resp *buf, u16 count, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags, u16 mac_seid, u16 vsi_seid, u16 *ret_seid); enum i40e_status_code i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue, u16 vsi_seid, u16 tag, u16 queue_num, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid, u16 tag, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pe_seid, u16 etag, u8 num_tags_in_buf, void *buf, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pe_seid, u16 etag, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid, u16 old_tag, u16 new_tag, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid, u16 vlan_id, u16 *stat_index, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid, u16 vlan_id, u16 stat_index, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_port_parameters(struct i40e_hw *hw, u16 bad_frame_vsi, bool save_bad_pac, bool pad_short_pac, bool double_vlan, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw, u16 flags, u8 *mac_addr, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, u16 seid, u16 credit, u8 max_credit, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit( struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw, u16 seid, u16 credit, u8 max_bw, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_vsi_bw_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_query_port_ets_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_port_ets_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, struct i40e_aqc_cloud_filters_element_bb *filters, u8 filter_count); enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 vsi, struct i40e_aqc_cloud_filters_element_data *filters, u8 filter_count); enum i40e_status_code i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 vsi, struct i40e_aqc_cloud_filters_element_data *filters, u8 filter_count); enum i40e_status_code i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, struct i40e_aqc_cloud_filters_element_bb *filters, u8 filter_count); enum i40e_status_code i40e_read_lldp_cfg(struct i40e_hw *hw, struct i40e_lldp_variables *lldp_cfg); enum i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw, struct i40e_aqc_replace_cloud_filters_cmd *filters, struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf); enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw, u32 reg_addr0, u32 *reg_val0, u32 reg_addr1, u32 *reg_val1); enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw, u32 addr, u32 dw_count, void *buffer); enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw, u32 reg_addr0, u32 reg_val0, u32 reg_addr1, u32 reg_val1); enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw, u32 addr, u32 dw_count, void *buffer); enum i40e_status_code i40e_aq_alternate_clear(struct i40e_hw *hw); enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw, u8 bios_mode, bool *reset_needed); enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw, u8 oem_mode); /* i40e_common */ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw); enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw); void i40e_clear_hw(struct i40e_hw *hw); void i40e_clear_pxe_mode(struct i40e_hw *hw); enum i40e_status_code i40e_get_link_status(struct i40e_hw *hw, bool *link_up); enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw); enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr); enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw, u32 *max_bw, u32 *min_bw, bool *min_valid, bool *max_valid); enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw, struct i40e_aqc_configure_partition_bw_data *bw_data, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr); enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, u32 pba_num_size); void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable); enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw); /* prototype for functions used for NVM access */ enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw); enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw, enum i40e_aq_resource_access_type access); void i40e_release_nvm(struct i40e_hw *hw); enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data); enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw, u8 module_ptr, u16 module_offset, u16 data_offset, u16 words_data_size, u16 *data_ptr); enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data); enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module, u32 offset, u16 words, void *data, bool last_command); enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data); enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data); enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset, void *data); enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw, u8 module, u32 offset, u16 words, void *data); enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum); enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw); enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw, u16 *checksum); enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *); void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, struct i40e_aq_desc *desc); void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw); void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); +enum i40e_status_code i40e_enable_eee(struct i40e_hw *hw, bool enable); enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw); extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; static INLINE struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) { return i40e_ptype_lookup[ptype]; } /** * i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition * @link_speed: the speed to convert * * Returns the link_speed in terms of the virtchnl interface, for use in * converting link_speed as reported by the AdminQ into the format used for * talking to virtchnl devices. If we can't represent the link speed properly, * report LINK_SPEED_UNKNOWN. **/ static INLINE enum virtchnl_link_speed i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed) { switch (link_speed) { case I40E_LINK_SPEED_100MB: return VIRTCHNL_LINK_SPEED_100MB; case I40E_LINK_SPEED_1GB: return VIRTCHNL_LINK_SPEED_1GB; + case I40E_LINK_SPEED_2_5GB: + return VIRTCHNL_LINK_SPEED_2_5GB; + case I40E_LINK_SPEED_5GB: + return VIRTCHNL_LINK_SPEED_5GB; case I40E_LINK_SPEED_10GB: return VIRTCHNL_LINK_SPEED_10GB; case I40E_LINK_SPEED_40GB: return VIRTCHNL_LINK_SPEED_40GB; case I40E_LINK_SPEED_20GB: return VIRTCHNL_LINK_SPEED_20GB; case I40E_LINK_SPEED_25GB: return VIRTCHNL_LINK_SPEED_25GB; case I40E_LINK_SPEED_UNKNOWN: default: return VIRTCHNL_LINK_SPEED_UNKNOWN; } } /* prototype for functions used for SW spinlocks */ void i40e_init_spinlock(struct i40e_spinlock *sp); void i40e_acquire_spinlock(struct i40e_spinlock *sp); void i40e_release_spinlock(struct i40e_spinlock *sp); void i40e_destroy_spinlock(struct i40e_spinlock *sp); /* i40e_common for VF drivers*/ void i40e_vf_parse_hw_config(struct i40e_hw *hw, struct virtchnl_vf_resource *msg); enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw); enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw, enum virtchnl_ops v_opcode, enum i40e_status_code v_retval, u8 *msg, u16 msglen, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw, struct i40e_filter_control_settings *settings); enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, u8 *mac_addr, u16 ethtype, u16 flags, u16 vsi_seid, u16 queue, bool is_add, struct i40e_control_filter_stats *stats, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, u8 table_id, u32 start_index, u16 buff_size, void *buff, u16 *ret_buff_size, u8 *ret_next_table, u32 *ret_next_index, struct i40e_asq_cmd_details *cmd_details); void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, u16 vsi_seid); enum i40e_status_code i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, u32 reg_addr, u32 *reg_val, struct i40e_asq_cmd_details *cmd_details); u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr); enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, u32 reg_addr, u32 reg_val, struct i40e_asq_cmd_details *cmd_details); void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val); enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw, u8 phy_select, u8 dev_addr, bool page_change, bool set_mdio, u8 mdio_num, u32 reg_addr, u32 reg_val, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw, u8 phy_select, u8 dev_addr, bool page_change, bool set_mdio, u8 mdio_num, u32 reg_addr, u32 *reg_val, struct i40e_asq_cmd_details *cmd_details); /* Convenience wrappers for most common use case */ #define i40e_aq_set_phy_register(hw, ps, da, pc, ra, rv, cd) \ i40e_aq_set_phy_register_ext(hw, ps, da, pc, FALSE, 0, ra, rv, cd) #define i40e_aq_get_phy_register(hw, ps, da, pc, ra, rv, cd) \ i40e_aq_get_phy_register_ext(hw, ps, da, pc, FALSE, 0, ra, rv, cd) + +enum i40e_status_code +i40e_aq_run_phy_activity(struct i40e_hw *hw, u16 activity_id, u32 opcode, + u32 *cmd_status, u32 *data0, u32 *data1, + struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw, struct i40e_aqc_arp_proxy_data *proxy_config, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw, struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw, u8 filter_index, struct i40e_aqc_set_wol_filter_data *filter, bool set_filter, bool no_wol_tco, bool filter_valid, bool no_wol_tco_valid, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw, u16 *wake_reason, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw, u16 reg, u8 phy_addr, u16 *value); enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw, u16 reg, u8 phy_addr, u16 value); enum i40e_status_code i40e_read_phy_register_clause45(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 *value); enum i40e_status_code i40e_write_phy_register_clause45(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 value); enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 *value); enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 value); u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); #endif /* _I40E_PROTOTYPE_H_ */ Index: head/sys/dev/ixl/i40e_type.h =================================================================== --- head/sys/dev/ixl/i40e_type.h (revision 365230) +++ head/sys/dev/ixl/i40e_type.h (revision 365231) @@ -1,1731 +1,1745 @@ /****************************************************************************** Copyright (c) 2013-2018, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _I40E_TYPE_H_ #define _I40E_TYPE_H_ #include "i40e_status.h" #include "i40e_osdep.h" #include "i40e_register.h" #include "i40e_adminq.h" #include "i40e_hmc.h" #include "i40e_lan_hmc.h" #include "i40e_devids.h" #define BIT(a) (1UL << (a)) #define BIT_ULL(a) (1ULL << (a)) #ifndef I40E_MASK /* I40E_MASK is a macro used on 32 bit registers */ #define I40E_MASK(mask, shift) (mask << shift) #endif #define I40E_MAX_PF 16 #define I40E_MAX_PF_VSI 64 #define I40E_MAX_PF_QP 128 #define I40E_MAX_VSI_QP 16 #define I40E_MAX_VF_VSI 4 #define I40E_MAX_CHAINED_RX_BUFFERS 5 #define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16 /* something less than 1 minute */ #define I40E_HEARTBEAT_TIMEOUT (HZ * 50) /* Max default timeout in ms, */ #define I40E_MAX_NVM_TIMEOUT 18000 /* Max timeout in ms for the phy to respond */ #define I40E_MAX_PHY_TIMEOUT 500 /* Check whether address is multicast. */ #define I40E_IS_MULTICAST(address) (bool)(((u8 *)(address))[0] & ((u8)0x01)) /* Check whether an address is broadcast. */ #define I40E_IS_BROADCAST(address) \ ((((u8 *)(address))[0] == ((u8)0xff)) && \ (((u8 *)(address))[1] == ((u8)0xff))) /* Switch from ms to the 1usec global time (this is the GTIME resolution) */ #define I40E_MS_TO_GTIME(time) ((time) * 1000) /* forward declaration */ struct i40e_hw; typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); #define ETH_ALEN 6 /* Data type manipulation macros. */ #define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF)) #define I40E_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF)) #define I40E_HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF)) #define I40E_LO_WORD(x) ((u16)((x) & 0xFFFF)) #define I40E_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF)) #define I40E_LO_BYTE(x) ((u8)((x) & 0xFF)) /* Number of Transmit Descriptors must be a multiple of 32. */ #define I40E_REQ_TX_DESCRIPTOR_MULTIPLE 32 /* Number of Receive Descriptors must be a multiple of 32 if * the number of descriptors is greater than 32. */ #define I40E_REQ_RX_DESCRIPTOR_MULTIPLE 32 #define I40E_DESC_UNUSED(R) \ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ (R)->next_to_clean - (R)->next_to_use - 1) /* bitfields for Tx queue mapping in QTX_CTL */ #define I40E_QTX_CTL_VF_QUEUE 0x0 #define I40E_QTX_CTL_VM_QUEUE 0x1 #define I40E_QTX_CTL_PF_QUEUE 0x2 /* debug masks - set these bits in hw->debug_mask to control output */ enum i40e_debug_mask { I40E_DEBUG_INIT = 0x00000001, I40E_DEBUG_RELEASE = 0x00000002, I40E_DEBUG_LINK = 0x00000010, I40E_DEBUG_PHY = 0x00000020, I40E_DEBUG_HMC = 0x00000040, I40E_DEBUG_NVM = 0x00000080, I40E_DEBUG_LAN = 0x00000100, I40E_DEBUG_FLOW = 0x00000200, I40E_DEBUG_DCB = 0x00000400, I40E_DEBUG_DIAG = 0x00000800, I40E_DEBUG_FD = 0x00001000, I40E_DEBUG_IWARP = 0x00F00000, I40E_DEBUG_AQ_MESSAGE = 0x01000000, I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000, I40E_DEBUG_AQ_COMMAND = 0x06000000, I40E_DEBUG_AQ = 0x0F000000, I40E_DEBUG_USER = 0xF0000000, I40E_DEBUG_ALL = 0xFFFFFFFF }; /* PCI Bus Info */ #define I40E_PCI_LINK_STATUS 0xB2 #define I40E_PCI_LINK_WIDTH 0x3F0 #define I40E_PCI_LINK_WIDTH_1 0x10 #define I40E_PCI_LINK_WIDTH_2 0x20 #define I40E_PCI_LINK_WIDTH_4 0x40 #define I40E_PCI_LINK_WIDTH_8 0x80 #define I40E_PCI_LINK_SPEED 0xF #define I40E_PCI_LINK_SPEED_2500 0x1 #define I40E_PCI_LINK_SPEED_5000 0x2 #define I40E_PCI_LINK_SPEED_8000 0x3 #define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_MASK(1, \ I40E_GLGEN_MSCA_STCODE_SHIFT) #define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_MASK(1, \ I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_MASK(2, \ I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_MASK(0, \ I40E_GLGEN_MSCA_STCODE_SHIFT) #define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_MASK(0, \ I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_MASK(1, \ I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK I40E_MASK(2, \ I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_MASK(3, \ I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_PHY_COM_REG_PAGE 0x1E #define I40E_PHY_LED_LINK_MODE_MASK 0xF0 #define I40E_PHY_LED_MANUAL_ON 0x100 #define I40E_PHY_LED_PROV_REG_1 0xC430 #define I40E_PHY_LED_MODE_MASK 0xFFFF #define I40E_PHY_LED_MODE_ORIG 0x80000000 /* Memory types */ enum i40e_memset_type { I40E_NONDMA_MEM = 0, I40E_DMA_MEM }; /* Memcpy types */ enum i40e_memcpy_type { I40E_NONDMA_TO_NONDMA = 0, I40E_NONDMA_TO_DMA, I40E_DMA_TO_DMA, I40E_DMA_TO_NONDMA }; /* These are structs for managing the hardware information and the operations. * The structures of function pointers are filled out at init time when we * know for sure exactly which hardware we're working with. This gives us the * flexibility of using the same main driver code but adapting to slightly * different hardware needs as new parts are developed. For this architecture, * the Firmware and AdminQ are intended to insulate the driver from most of the * future changes, but these structures will also do part of the job. */ enum i40e_mac_type { I40E_MAC_UNKNOWN = 0, I40E_MAC_XL710, I40E_MAC_VF, I40E_MAC_X722, I40E_MAC_X722_VF, I40E_MAC_GENERIC, }; enum i40e_media_type { I40E_MEDIA_TYPE_UNKNOWN = 0, I40E_MEDIA_TYPE_FIBER, I40E_MEDIA_TYPE_BASET, I40E_MEDIA_TYPE_BACKPLANE, I40E_MEDIA_TYPE_CX4, I40E_MEDIA_TYPE_DA, I40E_MEDIA_TYPE_VIRTUAL }; enum i40e_fc_mode { I40E_FC_NONE = 0, I40E_FC_RX_PAUSE, I40E_FC_TX_PAUSE, I40E_FC_FULL, I40E_FC_PFC, I40E_FC_DEFAULT }; enum i40e_set_fc_aq_failures { I40E_SET_FC_AQ_FAIL_NONE = 0, I40E_SET_FC_AQ_FAIL_GET = 1, I40E_SET_FC_AQ_FAIL_SET = 2, I40E_SET_FC_AQ_FAIL_UPDATE = 4, I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6 }; enum i40e_vsi_type { I40E_VSI_MAIN = 0, I40E_VSI_VMDQ1 = 1, I40E_VSI_VMDQ2 = 2, I40E_VSI_CTRL = 3, I40E_VSI_FCOE = 4, I40E_VSI_MIRROR = 5, I40E_VSI_SRIOV = 6, I40E_VSI_FDIR = 7, I40E_VSI_IWARP = 8, I40E_VSI_TYPE_UNKNOWN }; enum i40e_queue_type { I40E_QUEUE_TYPE_RX = 0, I40E_QUEUE_TYPE_TX, I40E_QUEUE_TYPE_PE_CEQ, I40E_QUEUE_TYPE_UNKNOWN }; struct i40e_link_status { enum i40e_aq_phy_type phy_type; enum i40e_aq_link_speed link_speed; u8 link_info; u8 an_info; u8 req_fec_info; u8 fec_info; u8 ext_info; u8 loopback; /* is Link Status Event notification to SW enabled */ bool lse_enable; u16 max_frame_size; bool crc_enable; u8 pacing; u8 requested_speeds; u8 module_type[3]; /* 1st byte: module identifier */ #define I40E_MODULE_TYPE_SFP 0x03 #define I40E_MODULE_TYPE_QSFP 0x0D /* 2nd byte: ethernet compliance codes for 10/40G */ #define I40E_MODULE_TYPE_40G_ACTIVE 0x01 #define I40E_MODULE_TYPE_40G_LR4 0x02 #define I40E_MODULE_TYPE_40G_SR4 0x04 #define I40E_MODULE_TYPE_40G_CR4 0x08 #define I40E_MODULE_TYPE_10G_BASE_SR 0x10 #define I40E_MODULE_TYPE_10G_BASE_LR 0x20 #define I40E_MODULE_TYPE_10G_BASE_LRM 0x40 #define I40E_MODULE_TYPE_10G_BASE_ER 0x80 /* 3rd byte: ethernet compliance codes for 1G */ #define I40E_MODULE_TYPE_1000BASE_SX 0x01 #define I40E_MODULE_TYPE_1000BASE_LX 0x02 #define I40E_MODULE_TYPE_1000BASE_CX 0x04 #define I40E_MODULE_TYPE_1000BASE_T 0x08 }; struct i40e_phy_info { struct i40e_link_status link_info; struct i40e_link_status link_info_old; bool get_link_info; enum i40e_media_type media_type; /* all the phy types the NVM is capable of */ u64 phy_types; }; #define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII) #define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX) #define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4) #define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR) #define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4) #define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI) #define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI) #define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI) #define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI) #define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI) #define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU) #define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU) #define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC) #define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC) #define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX) #define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T) #define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T) #define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR) #define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR) #define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU) #define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1) #define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4) #define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4) #define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4) #define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX) #define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX) #define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \ BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL) #define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2) /* * Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So, * a shift is needed to adjust for this with values larger than 31. The * only affected values are I40E_PHY_TYPE_25GBASE_*. */ #define I40E_PHY_TYPE_OFFSET 1 #define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \ I40E_PHY_TYPE_OFFSET) #define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \ I40E_PHY_TYPE_OFFSET) #define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \ I40E_PHY_TYPE_OFFSET) #define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \ I40E_PHY_TYPE_OFFSET) #define I40E_CAP_PHY_TYPE_25GBASE_AOC BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC + \ I40E_PHY_TYPE_OFFSET) #define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \ I40E_PHY_TYPE_OFFSET) +/* Offset for 2.5G/5G PHY Types value to bit number conversion */ +#define I40E_PHY_TYPE_OFFSET2 (-10) +#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T + \ + I40E_PHY_TYPE_OFFSET2) +#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T + \ + I40E_PHY_TYPE_OFFSET2) #define I40E_HW_CAP_MAX_GPIO 30 #define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0 #define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1 enum i40e_acpi_programming_method { I40E_ACPI_PROGRAMMING_METHOD_HW_FVL = 0, I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1 }; #define I40E_WOL_SUPPORT_MASK 0x1 #define I40E_ACPI_PROGRAMMING_METHOD_MASK 0x2 #define I40E_PROXY_SUPPORT_MASK 0x4 /* Capabilities of a PF or a VF or the whole device */ struct i40e_hw_capabilities { u32 switch_mode; #define I40E_NVM_IMAGE_TYPE_EVB 0x0 #define I40E_NVM_IMAGE_TYPE_CLOUD 0x2 #define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3 /* Cloud filter modes: * Mode1: Filter on L4 port only * Mode2: Filter for non-tunneled traffic * Mode3: Filter for tunnel traffic */ #define I40E_CLOUD_FILTER_MODE1 0x6 #define I40E_CLOUD_FILTER_MODE2 0x7 #define I40E_CLOUD_FILTER_MODE3 0x8 #define I40E_SWITCH_MODE_MASK 0xF u32 management_mode; u32 mng_protocols_over_mctp; #define I40E_MNG_PROTOCOL_PLDM 0x2 #define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4 #define I40E_MNG_PROTOCOL_NCSI 0x8 u32 npar_enable; u32 os2bmc; u32 valid_functions; bool sr_iov_1_1; bool vmdq; bool evb_802_1_qbg; /* Edge Virtual Bridging */ bool evb_802_1_qbh; /* Bridge Port Extension */ bool dcb; bool fcoe; bool iscsi; /* Indicates iSCSI enabled */ bool flex10_enable; bool flex10_capable; u32 flex10_mode; #define I40E_FLEX10_MODE_UNKNOWN 0x0 #define I40E_FLEX10_MODE_DCC 0x1 #define I40E_FLEX10_MODE_DCI 0x2 u32 flex10_status; #define I40E_FLEX10_STATUS_DCC_ERROR 0x1 #define I40E_FLEX10_STATUS_VC_MODE 0x2 bool sec_rev_disabled; bool update_disabled; #define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1 #define I40E_NVM_MGMT_UPDATE_DISABLED 0x2 bool mgmt_cem; bool ieee_1588; bool iwarp; bool fd; u32 fd_filters_guaranteed; u32 fd_filters_best_effort; bool rss; u32 rss_table_size; u32 rss_table_entry_width; bool led[I40E_HW_CAP_MAX_GPIO]; bool sdp[I40E_HW_CAP_MAX_GPIO]; u32 nvm_image_type; u32 num_flow_director_filters; u32 num_vfs; u32 vf_base_id; u32 num_vsis; u32 num_rx_qp; u32 num_tx_qp; u32 base_queue; u32 num_msix_vectors; u32 num_msix_vectors_vf; u32 led_pin_num; u32 sdp_pin_num; u32 mdio_port_num; u32 mdio_port_mode; u8 rx_buf_chain_len; u32 enabled_tcmap; u32 maxtc; u64 wr_csr_prot; bool apm_wol_support; enum i40e_acpi_programming_method acpi_prog_method; bool proxy_support; }; struct i40e_mac_info { enum i40e_mac_type type; u8 addr[ETH_ALEN]; u8 perm_addr[ETH_ALEN]; u8 san_addr[ETH_ALEN]; u8 port_addr[ETH_ALEN]; u16 max_fcoeq; }; enum i40e_aq_resources_ids { I40E_NVM_RESOURCE_ID = 1 }; enum i40e_aq_resource_access_type { I40E_RESOURCE_READ = 1, I40E_RESOURCE_WRITE }; struct i40e_nvm_info { u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */ u32 timeout; /* [ms] */ u16 sr_size; /* Shadow RAM size in words */ bool blank_nvm_mode; /* is NVM empty (no FW present)*/ u16 version; /* NVM package version */ u32 eetrack; /* NVM data version */ u32 oem_ver; /* OEM version info */ }; /* definitions used in NVM update support */ enum i40e_nvmupd_cmd { I40E_NVMUPD_INVALID, I40E_NVMUPD_READ_CON, I40E_NVMUPD_READ_SNT, I40E_NVMUPD_READ_LCB, I40E_NVMUPD_READ_SA, I40E_NVMUPD_WRITE_ERA, I40E_NVMUPD_WRITE_CON, I40E_NVMUPD_WRITE_SNT, I40E_NVMUPD_WRITE_LCB, I40E_NVMUPD_WRITE_SA, I40E_NVMUPD_CSUM_CON, I40E_NVMUPD_CSUM_SA, I40E_NVMUPD_CSUM_LCB, I40E_NVMUPD_STATUS, I40E_NVMUPD_EXEC_AQ, I40E_NVMUPD_GET_AQ_RESULT, I40E_NVMUPD_GET_AQ_EVENT, I40E_NVMUPD_FEATURES, }; enum i40e_nvmupd_state { I40E_NVMUPD_STATE_INIT, I40E_NVMUPD_STATE_READING, I40E_NVMUPD_STATE_WRITING, I40E_NVMUPD_STATE_INIT_WAIT, I40E_NVMUPD_STATE_WRITE_WAIT, I40E_NVMUPD_STATE_ERROR }; /* nvm_access definition and its masks/shifts need to be accessible to * application, core driver, and shared code. Where is the right file? */ #define I40E_NVM_READ 0xB #define I40E_NVM_WRITE 0xC #define I40E_NVM_MOD_PNT_MASK 0xFF #define I40E_NVM_TRANS_SHIFT 8 #define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT) #define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12 #define I40E_NVM_PRESERVATION_FLAGS_MASK \ (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT) #define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01 #define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02 #define I40E_NVM_CON 0x0 #define I40E_NVM_SNT 0x1 #define I40E_NVM_LCB 0x2 #define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) #define I40E_NVM_ERA 0x4 #define I40E_NVM_CSUM 0x8 #define I40E_NVM_AQE 0xe #define I40E_NVM_EXEC 0xf #define I40E_NVM_EXEC_GET_AQ_RESULT 0x0 #define I40E_NVM_EXEC_FEATURES 0xe #define I40E_NVM_EXEC_STATUS 0xf #define I40E_NVM_ADAPT_SHIFT 16 #define I40E_NVM_ADAPT_MASK (0xffffULL << I40E_NVM_ADAPT_SHIFT) #define I40E_NVMUPD_MAX_DATA 4096 #define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */ struct i40e_nvm_access { u32 command; u32 config; u32 offset; /* in bytes */ u32 data_size; /* in bytes */ u8 data[1]; }; /* NVMUpdate features API */ #define I40E_NVMUPD_FEATURES_API_VER_MAJOR 0 #define I40E_NVMUPD_FEATURES_API_VER_MINOR 14 #define I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN 12 #define I40E_NVMUPD_FEATURE_FLAT_NVM_SUPPORT BIT(0) struct i40e_nvmupd_features { u8 major; u8 minor; u16 size; u8 features[I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN]; }; /* (Q)SFP module access definitions */ #define I40E_I2C_EEPROM_DEV_ADDR 0xA0 #define I40E_I2C_EEPROM_DEV_ADDR2 0xA2 #define I40E_MODULE_TYPE_ADDR 0x00 #define I40E_MODULE_REVISION_ADDR 0x01 #define I40E_MODULE_SFF_8472_COMP 0x5E #define I40E_MODULE_SFF_8472_SWAP 0x5C #define I40E_MODULE_SFF_ADDR_MODE 0x04 #define I40E_MODULE_SFF_DIAG_CAPAB 0x40 #define I40E_MODULE_TYPE_QSFP_PLUS 0x0D #define I40E_MODULE_TYPE_QSFP28 0x11 #define I40E_MODULE_QSFP_MAX_LEN 640 /* PCI bus types */ enum i40e_bus_type { i40e_bus_type_unknown = 0, i40e_bus_type_pci, i40e_bus_type_pcix, i40e_bus_type_pci_express, i40e_bus_type_reserved }; /* PCI bus speeds */ enum i40e_bus_speed { i40e_bus_speed_unknown = 0, i40e_bus_speed_33 = 33, i40e_bus_speed_66 = 66, i40e_bus_speed_100 = 100, i40e_bus_speed_120 = 120, i40e_bus_speed_133 = 133, i40e_bus_speed_2500 = 2500, i40e_bus_speed_5000 = 5000, i40e_bus_speed_8000 = 8000, i40e_bus_speed_reserved }; /* PCI bus widths */ enum i40e_bus_width { i40e_bus_width_unknown = 0, i40e_bus_width_pcie_x1 = 1, i40e_bus_width_pcie_x2 = 2, i40e_bus_width_pcie_x4 = 4, i40e_bus_width_pcie_x8 = 8, i40e_bus_width_32 = 32, i40e_bus_width_64 = 64, i40e_bus_width_reserved }; /* Bus parameters */ struct i40e_bus_info { enum i40e_bus_speed speed; enum i40e_bus_width width; enum i40e_bus_type type; u16 func; u16 device; u16 lan_id; u16 bus_id; }; /* Flow control (FC) parameters */ struct i40e_fc_info { enum i40e_fc_mode current_mode; /* FC mode in effect */ enum i40e_fc_mode requested_mode; /* FC mode requested by caller */ }; #define I40E_MAX_TRAFFIC_CLASS 8 #define I40E_MAX_USER_PRIORITY 8 #define I40E_DCBX_MAX_APPS 32 #define I40E_LLDPDU_SIZE 1500 #define I40E_TLV_STATUS_OPER 0x1 #define I40E_TLV_STATUS_SYNC 0x2 #define I40E_TLV_STATUS_ERR 0x4 #define I40E_CEE_OPER_MAX_APPS 3 #define I40E_APP_PROTOID_FCOE 0x8906 #define I40E_APP_PROTOID_ISCSI 0x0cbc #define I40E_APP_PROTOID_FIP 0x8914 #define I40E_APP_SEL_ETHTYPE 0x1 #define I40E_APP_SEL_TCPIP 0x2 #define I40E_CEE_APP_SEL_ETHTYPE 0x0 #define I40E_CEE_APP_SEL_TCPIP 0x1 /* CEE or IEEE 802.1Qaz ETS Configuration data */ struct i40e_dcb_ets_config { u8 willing; u8 cbs; u8 maxtcs; u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; }; /* CEE or IEEE 802.1Qaz PFC Configuration data */ struct i40e_dcb_pfc_config { u8 willing; u8 mbc; u8 pfccap; u8 pfcenable; }; /* CEE or IEEE 802.1Qaz Application Priority data */ struct i40e_dcb_app_priority_table { u8 priority; u8 selector; u16 protocolid; }; struct i40e_dcbx_config { u8 dcbx_mode; #define I40E_DCBX_MODE_CEE 0x1 #define I40E_DCBX_MODE_IEEE 0x2 u8 app_mode; #define I40E_DCBX_APPS_NON_WILLING 0x1 u32 numapps; u32 tlv_status; /* CEE mode TLV status */ struct i40e_dcb_ets_config etscfg; struct i40e_dcb_ets_config etsrec; struct i40e_dcb_pfc_config pfc; struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS]; }; /* Port hardware description */ struct i40e_hw { u8 *hw_addr; void *back; /* subsystem structs */ struct i40e_phy_info phy; struct i40e_mac_info mac; struct i40e_bus_info bus; struct i40e_nvm_info nvm; struct i40e_fc_info fc; /* pci info */ u16 device_id; u16 vendor_id; u16 subsystem_device_id; u16 subsystem_vendor_id; u8 revision_id; u8 port; bool adapter_stopped; /* capabilities for entire device and PCI func */ struct i40e_hw_capabilities dev_caps; struct i40e_hw_capabilities func_caps; /* Flow Director shared filter space */ u16 fdir_shared_filter_count; /* device profile info */ u8 pf_id; u16 main_vsi_seid; /* for multi-function MACs */ u16 partition_id; u16 num_partitions; u16 num_ports; /* Closest numa node to the device */ u16 numa_node; /* Admin Queue info */ struct i40e_adminq_info aq; /* state of nvm update process */ enum i40e_nvmupd_state nvmupd_state; struct i40e_aq_desc nvm_wb_desc; struct i40e_aq_desc nvm_aq_event_desc; struct i40e_virt_mem nvm_buff; bool nvm_release_on_done; u16 nvm_wait_opcode; /* HMC info */ struct i40e_hmc_info hmc; /* HMC info struct */ /* LLDP/DCBX Status */ u16 dcbx_status; /* DCBX info */ struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */ struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */ struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */ /* WoL and proxy support */ u16 num_wol_proxy_filters; u16 wol_proxy_vsi_seid; #define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0) #define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1) #define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2) #define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3) #define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4) #define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5) #define I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED BIT_ULL(6) #define I40E_HW_FLAG_DROP_MODE BIT_ULL(7) #define I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE BIT_ULL(8) u64 flags; /* Used in set switch config AQ command */ u16 switch_tag; u16 first_tag; u16 second_tag; /* NVMUpdate features */ struct i40e_nvmupd_features nvmupd_features; /* debug mask */ u32 debug_mask; char err_str[16]; }; static INLINE bool i40e_is_vf(struct i40e_hw *hw) { return (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF); } struct i40e_driver_version { u8 major_version; u8 minor_version; u8 build_version; u8 subbuild_version; u8 driver_string[32]; }; /* RX Descriptors */ union i40e_16byte_rx_desc { struct { __le64 pkt_addr; /* Packet buffer address */ __le64 hdr_addr; /* Header buffer address */ } read; struct { struct { struct { union { __le16 mirroring_status; __le16 fcoe_ctx_id; } mirr_fcoe; __le16 l2tag1; } lo_dword; union { __le32 rss; /* RSS Hash */ __le32 fd_id; /* Flow director filter id */ __le32 fcoe_param; /* FCoE DDP Context id */ } hi_dword; } qword0; struct { /* ext status/error/pktype/length */ __le64 status_error_len; } qword1; } wb; /* writeback */ }; union i40e_32byte_rx_desc { struct { __le64 pkt_addr; /* Packet buffer address */ __le64 hdr_addr; /* Header buffer address */ /* bit 0 of hdr_buffer_addr is DD bit */ __le64 rsvd1; __le64 rsvd2; } read; struct { struct { struct { union { __le16 mirroring_status; __le16 fcoe_ctx_id; } mirr_fcoe; __le16 l2tag1; } lo_dword; union { __le32 rss; /* RSS Hash */ __le32 fcoe_param; /* FCoE DDP Context id */ /* Flow director filter id in case of * Programming status desc WB */ __le32 fd_id; } hi_dword; } qword0; struct { /* status/error/pktype/length */ __le64 status_error_len; } qword1; struct { __le16 ext_status; /* extended status */ __le16 rsvd; __le16 l2tag2_1; __le16 l2tag2_2; } qword2; struct { union { __le32 flex_bytes_lo; __le32 pe_status; } lo_dword; union { __le32 flex_bytes_hi; __le32 fd_id; } hi_dword; } qword3; } wb; /* writeback */ }; #define I40E_RXD_QW0_MIRROR_STATUS_SHIFT 8 #define I40E_RXD_QW0_MIRROR_STATUS_MASK (0x3FUL << \ I40E_RXD_QW0_MIRROR_STATUS_SHIFT) #define I40E_RXD_QW0_FCOEINDX_SHIFT 0 #define I40E_RXD_QW0_FCOEINDX_MASK (0xFFFUL << \ I40E_RXD_QW0_FCOEINDX_SHIFT) enum i40e_rx_desc_status_bits { /* Note: These are predefined bit offsets */ I40E_RX_DESC_STATUS_DD_SHIFT = 0, I40E_RX_DESC_STATUS_EOF_SHIFT = 1, I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2, I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3, I40E_RX_DESC_STATUS_CRCP_SHIFT = 4, I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8, I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ I40E_RX_DESC_STATUS_FLM_SHIFT = 11, I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, I40E_RX_DESC_STATUS_RESERVED2_SHIFT = 16, /* 2 BITS */ I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18, I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */ }; #define I40E_RXD_QW1_STATUS_SHIFT 0 #define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) << \ I40E_RXD_QW1_STATUS_SHIFT) #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT #define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT #define I40E_RXD_QW1_STATUS_TSYNVALID_MASK BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) #define I40E_RXD_QW1_STATUS_UMBCAST_SHIFT I40E_RX_DESC_STATUS_UMBCAST #define I40E_RXD_QW1_STATUS_UMBCAST_MASK (0x3UL << \ I40E_RXD_QW1_STATUS_UMBCAST_SHIFT) enum i40e_rx_desc_fltstat_values { I40E_RX_DESC_FLTSTAT_NO_DATA = 0, I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ I40E_RX_DESC_FLTSTAT_RSV = 2, I40E_RX_DESC_FLTSTAT_RSS_HASH = 3, }; #define I40E_RXD_PACKET_TYPE_UNICAST 0 #define I40E_RXD_PACKET_TYPE_MULTICAST 1 #define I40E_RXD_PACKET_TYPE_BROADCAST 2 #define I40E_RXD_PACKET_TYPE_MIRRORED 3 #define I40E_RXD_QW1_ERROR_SHIFT 19 #define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT) enum i40e_rx_desc_error_bits { /* Note: These are predefined bit offsets */ I40E_RX_DESC_ERROR_RXE_SHIFT = 0, I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1, I40E_RX_DESC_ERROR_HBO_SHIFT = 2, I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */ I40E_RX_DESC_ERROR_IPE_SHIFT = 3, I40E_RX_DESC_ERROR_L4E_SHIFT = 4, I40E_RX_DESC_ERROR_EIPE_SHIFT = 5, I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6, I40E_RX_DESC_ERROR_PPRS_SHIFT = 7 }; enum i40e_rx_desc_error_l3l4e_fcoe_masks { I40E_RX_DESC_ERROR_L3L4E_NONE = 0, I40E_RX_DESC_ERROR_L3L4E_PROT = 1, I40E_RX_DESC_ERROR_L3L4E_FC = 2, I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3, I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4 }; #define I40E_RXD_QW1_PTYPE_SHIFT 30 #define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT) /* Packet type non-ip values */ enum i40e_rx_l2_ptype { I40E_RX_PTYPE_L2_RESERVED = 0, I40E_RX_PTYPE_L2_MAC_PAY2 = 1, I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, I40E_RX_PTYPE_L2_FIP_PAY2 = 3, I40E_RX_PTYPE_L2_OUI_PAY2 = 4, I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, I40E_RX_PTYPE_L2_LLDP_PAY2 = 6, I40E_RX_PTYPE_L2_ECP_PAY2 = 7, I40E_RX_PTYPE_L2_EVB_PAY2 = 8, I40E_RX_PTYPE_L2_QCN_PAY2 = 9, I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10, I40E_RX_PTYPE_L2_ARP = 11, I40E_RX_PTYPE_L2_FCOE_PAY3 = 12, I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21, I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58, I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87, I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124, I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153 }; struct i40e_rx_ptype_decoded { u32 ptype:8; u32 known:1; u32 outer_ip:1; u32 outer_ip_ver:1; u32 outer_frag:1; u32 tunnel_type:3; u32 tunnel_end_prot:2; u32 tunnel_end_frag:1; u32 inner_prot:4; u32 payload_layer:3; }; enum i40e_rx_ptype_outer_ip { I40E_RX_PTYPE_OUTER_L2 = 0, I40E_RX_PTYPE_OUTER_IP = 1 }; enum i40e_rx_ptype_outer_ip_ver { I40E_RX_PTYPE_OUTER_NONE = 0, I40E_RX_PTYPE_OUTER_IPV4 = 0, I40E_RX_PTYPE_OUTER_IPV6 = 1 }; enum i40e_rx_ptype_outer_fragmented { I40E_RX_PTYPE_NOT_FRAG = 0, I40E_RX_PTYPE_FRAG = 1 }; enum i40e_rx_ptype_tunnel_type { I40E_RX_PTYPE_TUNNEL_NONE = 0, I40E_RX_PTYPE_TUNNEL_IP_IP = 1, I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2, I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, }; enum i40e_rx_ptype_tunnel_end_prot { I40E_RX_PTYPE_TUNNEL_END_NONE = 0, I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1, I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2, }; enum i40e_rx_ptype_inner_prot { I40E_RX_PTYPE_INNER_PROT_NONE = 0, I40E_RX_PTYPE_INNER_PROT_UDP = 1, I40E_RX_PTYPE_INNER_PROT_TCP = 2, I40E_RX_PTYPE_INNER_PROT_SCTP = 3, I40E_RX_PTYPE_INNER_PROT_ICMP = 4, I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5 }; enum i40e_rx_ptype_payload_layer { I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, }; #define I40E_RX_PTYPE_BIT_MASK 0x0FFFFFFF #define I40E_RX_PTYPE_SHIFT 56 #define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38 #define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ I40E_RXD_QW1_LENGTH_PBUF_SHIFT) #define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52 #define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \ I40E_RXD_QW1_LENGTH_HBUF_SHIFT) #define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 #define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT) #define I40E_RXD_QW1_NEXTP_SHIFT 38 #define I40E_RXD_QW1_NEXTP_MASK (0x1FFFULL << I40E_RXD_QW1_NEXTP_SHIFT) #define I40E_RXD_QW2_EXT_STATUS_SHIFT 0 #define I40E_RXD_QW2_EXT_STATUS_MASK (0xFFFFFUL << \ I40E_RXD_QW2_EXT_STATUS_SHIFT) enum i40e_rx_desc_ext_status_bits { /* Note: These are predefined bit offsets */ I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0, I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, }; #define I40E_RXD_QW2_L2TAG2_SHIFT 0 #define I40E_RXD_QW2_L2TAG2_MASK (0xFFFFUL << I40E_RXD_QW2_L2TAG2_SHIFT) #define I40E_RXD_QW2_L2TAG3_SHIFT 16 #define I40E_RXD_QW2_L2TAG3_MASK (0xFFFFUL << I40E_RXD_QW2_L2TAG3_SHIFT) enum i40e_rx_desc_pe_status_bits { /* Note: These are predefined bit offsets */ I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */ I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */ I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */ I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24, I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25, I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26, I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27, I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28, I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29 }; #define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38 #define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000 #define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2 #define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT) #define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT 0 #define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_MASK (0x7FFFUL << \ I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT) #define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19 #define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT) enum i40e_rx_prog_status_desc_status_bits { /* Note: These are predefined bit offsets */ I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0, I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */ }; enum i40e_rx_prog_status_desc_prog_id_masks { I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1, I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2, I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4, }; enum i40e_rx_prog_status_desc_error_bits { /* Note: These are predefined bit offsets */ I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1, I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 }; #define I40E_TWO_BIT_MASK 0x3 #define I40E_THREE_BIT_MASK 0x7 #define I40E_FOUR_BIT_MASK 0xF #define I40E_EIGHTEEN_BIT_MASK 0x3FFFF /* TX Descriptor */ struct i40e_tx_desc { __le64 buffer_addr; /* Address of descriptor's data buf */ __le64 cmd_type_offset_bsz; }; #define I40E_TXD_QW1_DTYPE_SHIFT 0 #define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT) enum i40e_tx_desc_dtype_value { I40E_TX_DESC_DTYPE_DATA = 0x0, I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */ I40E_TX_DESC_DTYPE_CONTEXT = 0x1, I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2, I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8, I40E_TX_DESC_DTYPE_DDP_CTX = 0x9, I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB, I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC, I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD, I40E_TX_DESC_DTYPE_DESC_DONE = 0xF }; #define I40E_TXD_QW1_CMD_SHIFT 4 #define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT) enum i40e_tx_desc_cmd_bits { I40E_TX_DESC_CMD_EOP = 0x0001, I40E_TX_DESC_CMD_RS = 0x0002, I40E_TX_DESC_CMD_ICRC = 0x0004, I40E_TX_DESC_CMD_IL2TAG1 = 0x0008, I40E_TX_DESC_CMD_DUMMY = 0x0010, I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */ I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */ I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */ I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */ I40E_TX_DESC_CMD_FCOET = 0x0080, I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */ I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */ I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */ I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */ I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */ I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */ I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */ I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */ }; #define I40E_TXD_QW1_OFFSET_SHIFT 16 #define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \ I40E_TXD_QW1_OFFSET_SHIFT) enum i40e_tx_desc_length_fields { /* Note: These are predefined bit offsets */ I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */ I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */ }; #define I40E_TXD_QW1_MACLEN_MASK (0x7FUL << I40E_TX_DESC_LENGTH_MACLEN_SHIFT) #define I40E_TXD_QW1_IPLEN_MASK (0x7FUL << I40E_TX_DESC_LENGTH_IPLEN_SHIFT) #define I40E_TXD_QW1_L4LEN_MASK (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT) #define I40E_TXD_QW1_FCLEN_MASK (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT) #define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34 #define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \ I40E_TXD_QW1_TX_BUF_SZ_SHIFT) #define I40E_TXD_QW1_L2TAG1_SHIFT 48 #define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT) /* Context descriptors */ struct i40e_tx_context_desc { __le32 tunneling_params; __le16 l2tag2; __le16 rsvd; __le64 type_cmd_tso_mss; }; #define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0 #define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT) #define I40E_TXD_CTX_QW1_CMD_SHIFT 4 #define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT) enum i40e_tx_ctx_desc_cmd_bits { I40E_TX_CTX_DESC_TSO = 0x01, I40E_TX_CTX_DESC_TSYN = 0x02, I40E_TX_CTX_DESC_IL2TAG2 = 0x04, I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08, I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00, I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10, I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20, I40E_TX_CTX_DESC_SWTCH_VSI = 0x30, I40E_TX_CTX_DESC_SWPE = 0x40 }; #define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30 #define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) #define I40E_TXD_CTX_QW1_MSS_SHIFT 50 #define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \ I40E_TXD_CTX_QW1_MSS_SHIFT) #define I40E_TXD_CTX_QW1_VSI_SHIFT 50 #define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT) #define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0 #define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \ I40E_TXD_CTX_QW0_EXT_IP_SHIFT) enum i40e_tx_ctx_desc_eipt_offload { I40E_TX_CTX_EXT_IP_NONE = 0x0, I40E_TX_CTX_EXT_IP_IPV6 = 0x1, I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2, I40E_TX_CTX_EXT_IP_IPV4 = 0x3 }; #define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2 #define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT) #define I40E_TXD_CTX_QW0_NATT_SHIFT 9 #define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 #define I40E_TXD_CTX_QW0_EIP_NOINC_MASK BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK #define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12 #define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \ I40E_TXD_CTX_QW0_NATLEN_SHIFT) #define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19 #define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ I40E_TXD_CTX_QW0_DECTTL_SHIFT) #define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23 #define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT) struct i40e_nop_desc { __le64 rsvd; __le64 dtype_cmd; }; #define I40E_TXD_NOP_QW1_DTYPE_SHIFT 0 #define I40E_TXD_NOP_QW1_DTYPE_MASK (0xFUL << I40E_TXD_NOP_QW1_DTYPE_SHIFT) #define I40E_TXD_NOP_QW1_CMD_SHIFT 4 #define I40E_TXD_NOP_QW1_CMD_MASK (0x7FUL << I40E_TXD_NOP_QW1_CMD_SHIFT) enum i40e_tx_nop_desc_cmd_bits { /* Note: These are predefined bit offsets */ I40E_TX_NOP_DESC_EOP_SHIFT = 0, I40E_TX_NOP_DESC_RS_SHIFT = 1, I40E_TX_NOP_DESC_RSV_SHIFT = 2 /* 5 bits */ }; struct i40e_filter_program_desc { __le32 qindex_flex_ptype_vsi; __le32 rsvd; __le32 dtype_cmd_cntindex; __le32 fd_id; }; #define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0 #define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \ I40E_TXD_FLTR_QW0_QINDEX_SHIFT) #define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11 #define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \ I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) #define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17 #define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) /* Packet Classifier Types for filters */ enum i40e_filter_pctype { /* Note: Values 0-28 are reserved for future use. * Value 29, 30, 32 are not supported on XL710 and X710. */ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29, I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30, I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32, I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, /* Note: Values 37-38 are reserved for future use. * Value 39, 40, 42 are not supported on XL710 and X710. */ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39, I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40, I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42, I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, I40E_FILTER_PCTYPE_FRAG_IPV6 = 46, /* Note: Value 47 is reserved for future use */ I40E_FILTER_PCTYPE_FCOE_OX = 48, I40E_FILTER_PCTYPE_FCOE_RX = 49, I40E_FILTER_PCTYPE_FCOE_OTHER = 50, /* Note: Values 51-62 are reserved for future use */ I40E_FILTER_PCTYPE_L2_PAYLOAD = 63, }; enum i40e_filter_program_desc_dest { I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0, I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1, I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2, }; enum i40e_filter_program_desc_fd_status { I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0, I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1, I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2, I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3, }; #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 #define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) #define I40E_TXD_FLTR_QW1_DTYPE_SHIFT 0 #define I40E_TXD_FLTR_QW1_DTYPE_MASK (0xFUL << I40E_TXD_FLTR_QW1_DTYPE_SHIFT) #define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 #define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT) enum i40e_filter_program_desc_pcmd { I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1, I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2, }; #define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT) #define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) #define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \ I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) #define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \ I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT) #define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20 #define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) enum i40e_filter_type { I40E_FLOW_DIRECTOR_FLTR = 0, I40E_PE_QUAD_HASH_FLTR = 1, I40E_ETHERTYPE_FLTR, I40E_FCOE_CTX_FLTR, I40E_MAC_VLAN_FLTR, I40E_HASH_FLTR }; struct i40e_vsi_context { u16 seid; u16 uplink_seid; u16 vsi_number; u16 vsis_allocated; u16 vsis_unallocated; u16 flags; u8 pf_num; u8 vf_num; u8 connection_type; struct i40e_aqc_vsi_properties_data info; }; struct i40e_veb_context { u16 seid; u16 uplink_seid; u16 veb_number; u16 vebs_allocated; u16 vebs_unallocated; u16 flags; struct i40e_aqc_get_veb_parameters_completion info; }; /* Statistics collected by each port, VSI, VEB, and S-channel */ struct i40e_eth_stats { u64 rx_bytes; /* gorc */ u64 rx_unicast; /* uprc */ u64 rx_multicast; /* mprc */ u64 rx_broadcast; /* bprc */ u64 rx_discards; /* rdpc */ u64 rx_unknown_protocol; /* rupp */ u64 tx_bytes; /* gotc */ u64 tx_unicast; /* uptc */ u64 tx_multicast; /* mptc */ u64 tx_broadcast; /* bptc */ u64 tx_discards; /* tdpc */ u64 tx_errors; /* tepc */ }; /* Statistics collected per VEB per TC */ struct i40e_veb_tc_stats { u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS]; u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS]; u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS]; u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS]; }; /* Statistics collected by the MAC */ struct i40e_hw_port_stats { /* eth stats collected by the port */ struct i40e_eth_stats eth; /* additional port specific stats */ u64 tx_dropped_link_down; /* tdold */ u64 crc_errors; /* crcerrs */ u64 illegal_bytes; /* illerrc */ u64 error_bytes; /* errbc */ u64 mac_local_faults; /* mlfc */ u64 mac_remote_faults; /* mrfc */ u64 rx_length_errors; /* rlec */ u64 link_xon_rx; /* lxonrxc */ u64 link_xoff_rx; /* lxoffrxc */ u64 priority_xon_rx[8]; /* pxonrxc[8] */ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */ u64 link_xon_tx; /* lxontxc */ u64 link_xoff_tx; /* lxofftxc */ u64 priority_xon_tx[8]; /* pxontxc[8] */ u64 priority_xoff_tx[8]; /* pxofftxc[8] */ u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */ u64 rx_size_64; /* prc64 */ u64 rx_size_127; /* prc127 */ u64 rx_size_255; /* prc255 */ u64 rx_size_511; /* prc511 */ u64 rx_size_1023; /* prc1023 */ u64 rx_size_1522; /* prc1522 */ u64 rx_size_big; /* prc9522 */ u64 rx_undersize; /* ruc */ u64 rx_fragments; /* rfc */ u64 rx_oversize; /* roc */ u64 rx_jabber; /* rjc */ u64 tx_size_64; /* ptc64 */ u64 tx_size_127; /* ptc127 */ u64 tx_size_255; /* ptc255 */ u64 tx_size_511; /* ptc511 */ u64 tx_size_1023; /* ptc1023 */ u64 tx_size_1522; /* ptc1522 */ u64 tx_size_big; /* ptc9522 */ u64 mac_short_packet_dropped; /* mspdc */ u64 checksum_error; /* xec */ /* flow director stats */ u64 fd_atr_match; u64 fd_sb_match; u64 fd_atr_tunnel_match; u32 fd_atr_status; u32 fd_sb_status; /* EEE LPI */ u32 tx_lpi_status; u32 rx_lpi_status; u64 tx_lpi_count; /* etlpic */ u64 rx_lpi_count; /* erlpic */ + u64 tx_lpi_duration; + u64 rx_lpi_duration; }; /* Checksum and Shadow RAM pointers */ #define I40E_SR_NVM_CONTROL_WORD 0x00 #define I40E_SR_PCIE_ANALOG_CONFIG_PTR 0x03 #define I40E_SR_PHY_ANALOG_CONFIG_PTR 0x04 #define I40E_SR_OPTION_ROM_PTR 0x05 #define I40E_SR_RO_PCIR_REGS_AUTO_LOAD_PTR 0x06 #define I40E_SR_AUTO_GENERATED_POINTERS_PTR 0x07 #define I40E_SR_PCIR_REGS_AUTO_LOAD_PTR 0x08 #define I40E_SR_EMP_GLOBAL_MODULE_PTR 0x09 #define I40E_SR_RO_PCIE_LCB_PTR 0x0A #define I40E_SR_EMP_IMAGE_PTR 0x0B #define I40E_SR_PE_IMAGE_PTR 0x0C #define I40E_SR_CSR_PROTECTED_LIST_PTR 0x0D #define I40E_SR_MNG_CONFIG_PTR 0x0E #define I40E_EMP_MODULE_PTR 0x0F #define I40E_SR_EMP_MODULE_PTR 0x48 #define I40E_SR_PBA_FLAGS 0x15 #define I40E_SR_PBA_BLOCK_PTR 0x16 #define I40E_SR_BOOT_CONFIG_PTR 0x17 #define I40E_NVM_OEM_VER_OFF 0x83 #define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 #define I40E_SR_NVM_WAKE_ON_LAN 0x19 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 #define I40E_SR_PERMANENT_SAN_MAC_ADDRESS_PTR 0x28 #define I40E_SR_NVM_MAP_VERSION 0x29 #define I40E_SR_NVM_IMAGE_VERSION 0x2A #define I40E_SR_NVM_STRUCTURE_VERSION 0x2B #define I40E_SR_NVM_EETRACK_LO 0x2D #define I40E_SR_NVM_EETRACK_HI 0x2E #define I40E_SR_VPD_PTR 0x2F #define I40E_SR_PXE_SETUP_PTR 0x30 #define I40E_SR_PXE_CONFIG_CUST_OPTIONS_PTR 0x31 #define I40E_SR_NVM_ORIGINAL_EETRACK_LO 0x34 #define I40E_SR_NVM_ORIGINAL_EETRACK_HI 0x35 #define I40E_SR_SW_ETHERNET_MAC_ADDRESS_PTR 0x37 #define I40E_SR_POR_REGS_AUTO_LOAD_PTR 0x38 #define I40E_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A #define I40E_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B #define I40E_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C #define I40E_SR_PHY_ACTIVITY_LIST_PTR 0x3D #define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E #define I40E_SR_SW_CHECKSUM_WORD 0x3F #define I40E_SR_1ST_FREE_PROVISION_AREA_PTR 0x40 #define I40E_SR_4TH_FREE_PROVISION_AREA_PTR 0x42 #define I40E_SR_3RD_FREE_PROVISION_AREA_PTR 0x44 #define I40E_SR_2ND_FREE_PROVISION_AREA_PTR 0x46 #define I40E_SR_EMP_SR_SETTINGS_PTR 0x48 #define I40E_SR_FEATURE_CONFIGURATION_PTR 0x49 #define I40E_SR_CONFIGURATION_METADATA_PTR 0x4D #define I40E_SR_IMMEDIATE_VALUES_PTR 0x4E +#define I40E_SR_5TH_FREE_PROVISION_AREA_PTR 0x50 /* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */ #define I40E_SR_VPD_MODULE_MAX_SIZE 1024 #define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 #define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 #define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) #define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5) #define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12) #define I40E_PTR_TYPE BIT(15) #define I40E_SR_OCP_CFG_WORD0 0x2B #define I40E_SR_OCP_ENABLED BIT(15) /* Shadow RAM related */ #define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800 #define I40E_SR_BUF_ALIGNMENT 4096 #define I40E_SR_WORDS_IN_1KB 512 /* Checksum should be calculated such that after adding all the words, * including the checksum word itself, the sum should be 0xBABA. */ #define I40E_SR_SW_CHECKSUM_BASE 0xBABA #define I40E_SRRD_SRCTL_ATTEMPTS 100000 enum i40e_switch_element_types { I40E_SWITCH_ELEMENT_TYPE_MAC = 1, I40E_SWITCH_ELEMENT_TYPE_PF = 2, I40E_SWITCH_ELEMENT_TYPE_VF = 3, I40E_SWITCH_ELEMENT_TYPE_EMP = 4, I40E_SWITCH_ELEMENT_TYPE_BMC = 6, I40E_SWITCH_ELEMENT_TYPE_PE = 16, I40E_SWITCH_ELEMENT_TYPE_VEB = 17, I40E_SWITCH_ELEMENT_TYPE_PA = 18, I40E_SWITCH_ELEMENT_TYPE_VSI = 19, }; /* Supported EtherType filters */ enum i40e_ether_type_index { I40E_ETHER_TYPE_1588 = 0, I40E_ETHER_TYPE_FIP = 1, I40E_ETHER_TYPE_OUI_EXTENDED = 2, I40E_ETHER_TYPE_MAC_CONTROL = 3, I40E_ETHER_TYPE_LLDP = 4, I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5, I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6, I40E_ETHER_TYPE_QCN_CNM = 7, I40E_ETHER_TYPE_8021X = 8, I40E_ETHER_TYPE_ARP = 9, I40E_ETHER_TYPE_RSV1 = 10, I40E_ETHER_TYPE_RSV2 = 11, }; /* Filter context base size is 1K */ #define I40E_HASH_FILTER_BASE_SIZE 1024 /* Supported Hash filter values */ enum i40e_hash_filter_size { I40E_HASH_FILTER_SIZE_1K = 0, I40E_HASH_FILTER_SIZE_2K = 1, I40E_HASH_FILTER_SIZE_4K = 2, I40E_HASH_FILTER_SIZE_8K = 3, I40E_HASH_FILTER_SIZE_16K = 4, I40E_HASH_FILTER_SIZE_32K = 5, I40E_HASH_FILTER_SIZE_64K = 6, I40E_HASH_FILTER_SIZE_128K = 7, I40E_HASH_FILTER_SIZE_256K = 8, I40E_HASH_FILTER_SIZE_512K = 9, I40E_HASH_FILTER_SIZE_1M = 10, }; /* DMA context base size is 0.5K */ #define I40E_DMA_CNTX_BASE_SIZE 512 /* Supported DMA context values */ enum i40e_dma_cntx_size { I40E_DMA_CNTX_SIZE_512 = 0, I40E_DMA_CNTX_SIZE_1K = 1, I40E_DMA_CNTX_SIZE_2K = 2, I40E_DMA_CNTX_SIZE_4K = 3, I40E_DMA_CNTX_SIZE_8K = 4, I40E_DMA_CNTX_SIZE_16K = 5, I40E_DMA_CNTX_SIZE_32K = 6, I40E_DMA_CNTX_SIZE_64K = 7, I40E_DMA_CNTX_SIZE_128K = 8, I40E_DMA_CNTX_SIZE_256K = 9, }; /* Supported Hash look up table (LUT) sizes */ enum i40e_hash_lut_size { I40E_HASH_LUT_SIZE_128 = 0, I40E_HASH_LUT_SIZE_512 = 1, }; /* Structure to hold a per PF filter control settings */ struct i40e_filter_control_settings { /* number of PE Quad Hash filter buckets */ enum i40e_hash_filter_size pe_filt_num; /* number of PE Quad Hash contexts */ enum i40e_dma_cntx_size pe_cntx_num; /* number of FCoE filter buckets */ enum i40e_hash_filter_size fcoe_filt_num; /* number of FCoE DDP contexts */ enum i40e_dma_cntx_size fcoe_cntx_num; /* size of the Hash LUT */ enum i40e_hash_lut_size hash_lut_size; /* enable FDIR filters for PF and its VFs */ bool enable_fdir; /* enable Ethertype filters for PF and its VFs */ bool enable_ethtype; /* enable MAC/VLAN filters for PF and its VFs */ bool enable_macvlan; }; /* Structure to hold device level control filter counts */ struct i40e_control_filter_stats { u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */ u16 etype_used; /* Used perfect EtherType filters */ u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */ u16 etype_free; /* Un-used perfect EtherType filters */ }; enum i40e_reset_type { I40E_RESET_POR = 0, I40E_RESET_CORER = 1, I40E_RESET_GLOBR = 2, I40E_RESET_EMPR = 3, }; /* IEEE 802.1AB LLDP Agent Variables from NVM */ #define I40E_NVM_LLDP_CFG_PTR 0x06 #define I40E_SR_LLDP_CFG_PTR 0x31 struct i40e_lldp_variables { u16 length; u16 adminstatus; u16 msgfasttx; u16 msgtxinterval; u16 txparams; u16 timers; u16 crc8; }; /* Offsets into Alternate Ram */ #define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */ #define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */ #define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */ #define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */ #define I40E_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */ #define I40E_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */ /* Alternate Ram Bandwidth Masks */ #define I40E_ALT_BW_VALUE_MASK 0xFF #define I40E_ALT_BW_RELATIVE_MASK 0x40000000 #define I40E_ALT_BW_VALID_MASK 0x80000000 /* RSS Hash Table Size */ #define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 /* INPUT SET MASK for RSS, flow director, and flexible payload */ #define I40E_L3_SRC_SHIFT 47 #define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT) #define I40E_L3_V6_SRC_SHIFT 43 #define I40E_L3_V6_SRC_MASK (0xFFULL << I40E_L3_V6_SRC_SHIFT) #define I40E_L3_DST_SHIFT 35 #define I40E_L3_DST_MASK (0x3ULL << I40E_L3_DST_SHIFT) #define I40E_L3_V6_DST_SHIFT 35 #define I40E_L3_V6_DST_MASK (0xFFULL << I40E_L3_V6_DST_SHIFT) #define I40E_L4_SRC_SHIFT 34 #define I40E_L4_SRC_MASK (0x1ULL << I40E_L4_SRC_SHIFT) #define I40E_L4_DST_SHIFT 33 #define I40E_L4_DST_MASK (0x1ULL << I40E_L4_DST_SHIFT) #define I40E_VERIFY_TAG_SHIFT 31 #define I40E_VERIFY_TAG_MASK (0x3ULL << I40E_VERIFY_TAG_SHIFT) #define I40E_FLEX_50_SHIFT 13 #define I40E_FLEX_50_MASK (0x1ULL << I40E_FLEX_50_SHIFT) #define I40E_FLEX_51_SHIFT 12 #define I40E_FLEX_51_MASK (0x1ULL << I40E_FLEX_51_SHIFT) #define I40E_FLEX_52_SHIFT 11 #define I40E_FLEX_52_MASK (0x1ULL << I40E_FLEX_52_SHIFT) #define I40E_FLEX_53_SHIFT 10 #define I40E_FLEX_53_MASK (0x1ULL << I40E_FLEX_53_SHIFT) #define I40E_FLEX_54_SHIFT 9 #define I40E_FLEX_54_MASK (0x1ULL << I40E_FLEX_54_SHIFT) #define I40E_FLEX_55_SHIFT 8 #define I40E_FLEX_55_MASK (0x1ULL << I40E_FLEX_55_SHIFT) #define I40E_FLEX_56_SHIFT 7 #define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT) #define I40E_FLEX_57_SHIFT 6 #define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT) +#define I40E_BCM_PHY_PCS_STATUS1_PAGE 0x3 +#define I40E_BCM_PHY_PCS_STATUS1_REG 0x0001 +#define I40E_BCM_PHY_PCS_STATUS1_RX_LPI BIT(8) +#define I40E_BCM_PHY_PCS_STATUS1_TX_LPI BIT(9) + #endif /* _I40E_TYPE_H_ */ Index: head/sys/dev/ixl/if_ixl.c =================================================================== --- head/sys/dev/ixl/if_ixl.c (revision 365230) +++ head/sys/dev/ixl/if_ixl.c (revision 365231) @@ -1,1845 +1,1863 @@ /****************************************************************************** Copyright (c) 2013-2018, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixl.h" #include "ixl_pf.h" #ifdef IXL_IW #include "ixl_iw.h" #include "ixl_iw_int.h" #endif #ifdef PCI_IOV #include "ixl_pf_iov.h" #endif /********************************************************************* * Driver version *********************************************************************/ #define IXL_DRIVER_VERSION_MAJOR 2 -#define IXL_DRIVER_VERSION_MINOR 2 +#define IXL_DRIVER_VERSION_MINOR 3 #define IXL_DRIVER_VERSION_BUILD 0 #define IXL_DRIVER_VERSION_STRING \ __XSTRING(IXL_DRIVER_VERSION_MAJOR) "." \ __XSTRING(IXL_DRIVER_VERSION_MINOR) "." \ __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k" /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on * * ( Vendor ID, Device ID, Branding String ) *********************************************************************/ static pci_vendor_info_t ixl_vendor_info_array[] = { PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"), + PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC, "Intel(R) Ethernet Controller X710 for 10GBASE-T"), + PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"), + PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B, "Intel(R) Ethernet Controller X710 for 10GbE backplane"), + PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC, "Intel(R) Ethernet Controller V710 for 5GBASE-T"), /* required last entry */ PVID_END }; /********************************************************************* * Function prototypes *********************************************************************/ /*** IFLIB interface ***/ static void *ixl_register(device_t dev); static int ixl_if_attach_pre(if_ctx_t ctx); static int ixl_if_attach_post(if_ctx_t ctx); static int ixl_if_detach(if_ctx_t ctx); static int ixl_if_shutdown(if_ctx_t ctx); static int ixl_if_suspend(if_ctx_t ctx); static int ixl_if_resume(if_ctx_t ctx); static int ixl_if_msix_intr_assign(if_ctx_t ctx, int msix); static void ixl_if_enable_intr(if_ctx_t ctx); static void ixl_if_disable_intr(if_ctx_t ctx); static int ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid); static int ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid); static int ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets); static int ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets); static void ixl_if_queues_free(if_ctx_t ctx); static void ixl_if_update_admin_status(if_ctx_t ctx); static void ixl_if_multi_set(if_ctx_t ctx); static int ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu); static void ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr); static int ixl_if_media_change(if_ctx_t ctx); static int ixl_if_promisc_set(if_ctx_t ctx, int flags); static void ixl_if_timer(if_ctx_t ctx, uint16_t qid); static void ixl_if_vlan_register(if_ctx_t ctx, u16 vtag); static void ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag); static uint64_t ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt); static int ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req); static int ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data); static bool ixl_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event); #ifdef PCI_IOV static void ixl_if_vflr_handle(if_ctx_t ctx); #endif /*** Other ***/ static u_int ixl_mc_filter_apply(void *, struct sockaddr_dl *, u_int); static void ixl_save_pf_tunables(struct ixl_pf *); static int ixl_allocate_pci_resources(struct ixl_pf *); static void ixl_setup_ssctx(struct ixl_pf *pf); static void ixl_admin_timer(void *arg); /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t ixl_methods[] = { /* Device interface */ DEVMETHOD(device_register, ixl_register), DEVMETHOD(device_probe, iflib_device_probe), DEVMETHOD(device_attach, iflib_device_attach), DEVMETHOD(device_detach, iflib_device_detach), DEVMETHOD(device_shutdown, iflib_device_shutdown), #ifdef PCI_IOV DEVMETHOD(pci_iov_init, iflib_device_iov_init), DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit), DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf), #endif DEVMETHOD_END }; static driver_t ixl_driver = { "ixl", ixl_methods, sizeof(struct ixl_pf), }; devclass_t ixl_devclass; DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0); IFLIB_PNP_INFO(pci, ixl, ixl_vendor_info_array); MODULE_VERSION(ixl, 3); MODULE_DEPEND(ixl, pci, 1, 1, 1); MODULE_DEPEND(ixl, ether, 1, 1, 1); MODULE_DEPEND(ixl, iflib, 1, 1, 1); static device_method_t ixl_if_methods[] = { DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre), DEVMETHOD(ifdi_attach_post, ixl_if_attach_post), DEVMETHOD(ifdi_detach, ixl_if_detach), DEVMETHOD(ifdi_shutdown, ixl_if_shutdown), DEVMETHOD(ifdi_suspend, ixl_if_suspend), DEVMETHOD(ifdi_resume, ixl_if_resume), DEVMETHOD(ifdi_init, ixl_if_init), DEVMETHOD(ifdi_stop, ixl_if_stop), DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign), DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr), DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr), DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable), DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable), DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc), DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc), DEVMETHOD(ifdi_queues_free, ixl_if_queues_free), DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status), DEVMETHOD(ifdi_multi_set, ixl_if_multi_set), DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set), DEVMETHOD(ifdi_media_status, ixl_if_media_status), DEVMETHOD(ifdi_media_change, ixl_if_media_change), DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set), DEVMETHOD(ifdi_timer, ixl_if_timer), DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register), DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister), DEVMETHOD(ifdi_get_counter, ixl_if_get_counter), DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req), DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl), DEVMETHOD(ifdi_needs_restart, ixl_if_needs_restart), #ifdef PCI_IOV DEVMETHOD(ifdi_iov_init, ixl_if_iov_init), DEVMETHOD(ifdi_iov_uninit, ixl_if_iov_uninit), DEVMETHOD(ifdi_iov_vf_add, ixl_if_iov_vf_add), DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle), #endif // ifdi_led_func // ifdi_debug DEVMETHOD_END }; static driver_t ixl_if_driver = { "ixl_if", ixl_if_methods, sizeof(struct ixl_pf) }; /* ** TUNEABLE PARAMETERS: */ static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "ixl driver parameters"); #ifdef IXL_DEBUG_FC /* * Leave this on unless you need to send flow control * frames (or other control frames) from software */ static int ixl_enable_tx_fc_filter = 1; TUNABLE_INT("hw.ixl.enable_tx_fc_filter", &ixl_enable_tx_fc_filter); SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN, &ixl_enable_tx_fc_filter, 0, "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources"); #endif #ifdef IXL_DEBUG static int ixl_debug_recovery_mode = 0; TUNABLE_INT("hw.ixl.debug_recovery_mode", &ixl_debug_recovery_mode); SYSCTL_INT(_hw_ixl, OID_AUTO, debug_recovery_mode, CTLFLAG_RDTUN, &ixl_debug_recovery_mode, 0, "Act like when FW entered recovery mode (for debuging)"); #endif static int ixl_i2c_access_method = 0; TUNABLE_INT("hw.ixl.i2c_access_method", &ixl_i2c_access_method); SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN, &ixl_i2c_access_method, 0, IXL_SYSCTL_HELP_I2C_METHOD); static int ixl_enable_vf_loopback = 1; TUNABLE_INT("hw.ixl.enable_vf_loopback", &ixl_enable_vf_loopback); SYSCTL_INT(_hw_ixl, OID_AUTO, enable_vf_loopback, CTLFLAG_RDTUN, &ixl_enable_vf_loopback, 0, IXL_SYSCTL_HELP_VF_LOOPBACK); /* * Different method for processing TX descriptor * completion. */ static int ixl_enable_head_writeback = 1; TUNABLE_INT("hw.ixl.enable_head_writeback", &ixl_enable_head_writeback); SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN, &ixl_enable_head_writeback, 0, "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors"); static int ixl_core_debug_mask = 0; TUNABLE_INT("hw.ixl.core_debug_mask", &ixl_core_debug_mask); SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN, &ixl_core_debug_mask, 0, "Display debug statements that are printed in non-shared code"); static int ixl_shared_debug_mask = 0; TUNABLE_INT("hw.ixl.shared_debug_mask", &ixl_shared_debug_mask); SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN, &ixl_shared_debug_mask, 0, "Display debug statements that are printed in shared code"); #if 0 /* ** Controls for Interrupt Throttling ** - true/false for dynamic adjustment ** - default values for static ITR */ static int ixl_dynamic_rx_itr = 0; TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr); SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN, &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate"); static int ixl_dynamic_tx_itr = 0; TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr); SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN, &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate"); #endif static int ixl_rx_itr = IXL_ITR_8K; TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr); SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN, &ixl_rx_itr, 0, "RX Interrupt Rate"); static int ixl_tx_itr = IXL_ITR_4K; TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr); SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN, &ixl_tx_itr, 0, "TX Interrupt Rate"); #ifdef IXL_IW int ixl_enable_iwarp = 0; TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp); SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN, &ixl_enable_iwarp, 0, "iWARP enabled"); #if __FreeBSD_version < 1100000 int ixl_limit_iwarp_msix = 1; #else int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX; #endif TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix); SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN, &ixl_limit_iwarp_msix, 0, "Limit MSI-X vectors assigned to iWARP"); #endif extern struct if_txrx ixl_txrx_hwb; extern struct if_txrx ixl_txrx_dwb; static struct if_shared_ctx ixl_sctx_init = { .isc_magic = IFLIB_MAGIC, .isc_q_align = PAGE_SIZE, .isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header), .isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE, .isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header), .isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE, .isc_rx_maxsize = 16384, .isc_rx_nsegments = IXL_MAX_RX_SEGS, .isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE, .isc_nfl = 1, .isc_ntxqs = 1, .isc_nrxqs = 1, .isc_admin_intrcnt = 1, .isc_vendor_info = ixl_vendor_info_array, .isc_driver_version = IXL_DRIVER_VERSION_STRING, .isc_driver = &ixl_if_driver, .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_ADMIN_ALWAYS_RUN, .isc_nrxd_min = {IXL_MIN_RING}, .isc_ntxd_min = {IXL_MIN_RING}, .isc_nrxd_max = {IXL_MAX_RING}, .isc_ntxd_max = {IXL_MAX_RING}, .isc_nrxd_default = {IXL_DEFAULT_RING}, .isc_ntxd_default = {IXL_DEFAULT_RING}, }; if_shared_ctx_t ixl_sctx = &ixl_sctx_init; /*** Functions ***/ static void * ixl_register(device_t dev) { return (ixl_sctx); } static int ixl_allocate_pci_resources(struct ixl_pf *pf) { device_t dev = iflib_get_dev(pf->vsi.ctx); struct i40e_hw *hw = &pf->hw; int rid; /* Map BAR0 */ rid = PCIR_BAR(0); pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!(pf->pci_mem)) { device_printf(dev, "Unable to allocate bus resource: PCI memory\n"); return (ENXIO); } /* Save off the PCI information */ hw->vendor_id = pci_get_vendor(dev); hw->device_id = pci_get_device(dev); hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); hw->subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); hw->subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); hw->bus.device = pci_get_slot(dev); hw->bus.func = pci_get_function(dev); /* Save off register access information */ pf->osdep.mem_bus_space_tag = rman_get_bustag(pf->pci_mem); pf->osdep.mem_bus_space_handle = rman_get_bushandle(pf->pci_mem); pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem); pf->osdep.flush_reg = I40E_GLGEN_STAT; pf->osdep.dev = dev; pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle; pf->hw.back = &pf->osdep; return (0); } static void ixl_setup_ssctx(struct ixl_pf *pf) { if_softc_ctx_t scctx = pf->vsi.shared; struct i40e_hw *hw = &pf->hw; if (IXL_PF_IN_RECOVERY_MODE(pf)) { scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1; scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1; } else if (hw->mac.type == I40E_MAC_X722) scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128; else scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; if (pf->vsi.enable_head_writeback) { scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN); scctx->isc_txrx = &ixl_txrx_hwb; } else { scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc), DBA_ALIGN); scctx->isc_txrx = &ixl_txrx_dwb; } scctx->isc_txrx->ift_legacy_intr = ixl_intr; scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN); scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR); scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS; scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS; scctx->isc_tx_tso_size_max = IXL_TSO_SIZE; scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE; scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size; scctx->isc_tx_csum_flags = CSUM_OFFLOAD; scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS; } static void ixl_admin_timer(void *arg) { struct ixl_pf *pf = (struct ixl_pf *)arg; /* Fire off the admin task */ iflib_admin_intr_deferred(pf->vsi.ctx); /* Reschedule the admin timer */ callout_schedule(&pf->admin_timer, hz/2); } static int ixl_attach_pre_recovery_mode(struct ixl_pf *pf) { struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); i40e_get_mac_addr(hw, hw->mac.addr); if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { ixl_configure_intr0_msix(pf); ixl_enable_intr0(hw); } ixl_setup_ssctx(pf); return (0); } static int ixl_if_attach_pre(if_ctx_t ctx) { device_t dev; struct ixl_pf *pf; struct i40e_hw *hw; struct ixl_vsi *vsi; enum i40e_get_fw_lldp_status_resp lldp_status; struct i40e_filter_control_settings filter; enum i40e_status_code status; int error = 0; dev = iflib_get_dev(ctx); pf = iflib_get_softc(ctx); INIT_DBG_DEV(dev, "begin"); vsi = &pf->vsi; vsi->back = pf; pf->dev = dev; hw = &pf->hw; vsi->dev = dev; vsi->hw = &pf->hw; vsi->id = 0; vsi->num_vlans = 0; vsi->ctx = ctx; vsi->media = iflib_get_media(ctx); vsi->shared = iflib_get_softc_ctx(ctx); snprintf(pf->admin_mtx_name, sizeof(pf->admin_mtx_name), "%s:admin", device_get_nameunit(dev)); mtx_init(&pf->admin_mtx, pf->admin_mtx_name, NULL, MTX_DEF); callout_init_mtx(&pf->admin_timer, &pf->admin_mtx, 0); /* Save tunable values */ ixl_save_pf_tunables(pf); /* Do PCI setup - map BAR0, etc */ if (ixl_allocate_pci_resources(pf)) { device_printf(dev, "Allocation of PCI resources failed\n"); error = ENXIO; goto err_pci_res; } /* Establish a clean starting point */ i40e_clear_hw(hw); i40e_set_mac_type(hw); error = ixl_pf_reset(pf); if (error) goto err_out; /* Initialize the shared code */ status = i40e_init_shared_code(hw); if (status) { device_printf(dev, "Unable to initialize shared code, error %s\n", i40e_stat_str(hw, status)); error = EIO; goto err_out; } /* Set up the admin queue */ hw->aq.num_arq_entries = IXL_AQ_LEN; hw->aq.num_asq_entries = IXL_AQ_LEN; hw->aq.arq_buf_size = IXL_AQ_BUF_SZ; hw->aq.asq_buf_size = IXL_AQ_BUF_SZ; status = i40e_init_adminq(hw); if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) { device_printf(dev, "Unable to initialize Admin Queue, error %s\n", i40e_stat_str(hw, status)); error = EIO; goto err_out; } ixl_print_nvm_version(pf); if (status == I40E_ERR_FIRMWARE_API_VERSION) { device_printf(dev, "The driver for the device stopped " "because the NVM image is newer than expected.\n"); device_printf(dev, "You must install the most recent version of " "the network driver.\n"); error = EIO; goto err_out; } if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) { device_printf(dev, "The driver for the device detected " "a newer version of the NVM image than expected.\n"); device_printf(dev, "Please install the most recent version " "of the network driver.\n"); } else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) { device_printf(dev, "The driver for the device detected " "an older version of the NVM image than expected.\n"); device_printf(dev, "Please update the NVM image.\n"); } if (IXL_PF_IN_RECOVERY_MODE(pf)) { error = ixl_attach_pre_recovery_mode(pf); if (error) goto err_out; return (error); } /* Clear PXE mode */ i40e_clear_pxe_mode(hw); /* Get capabilities from the device */ error = ixl_get_hw_capabilities(pf); if (error) { device_printf(dev, "get_hw_capabilities failed: %d\n", error); goto err_get_cap; } /* Set up host memory cache */ error = ixl_setup_hmc(pf); if (error) goto err_mac_hmc; /* Disable LLDP from the firmware for certain NVM versions */ if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || (pf->hw.aq.fw_maj_ver < 4)) { i40e_aq_stop_lldp(hw, true, false, NULL); pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED; } + /* Try enabling Energy Efficient Ethernet (EEE) mode */ + if (i40e_enable_eee(hw, true) == I40E_SUCCESS) + atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED); + else + atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED); + /* Get MAC addresses from hardware */ i40e_get_mac_addr(hw, hw->mac.addr); error = i40e_validate_mac_addr(hw->mac.addr); if (error) { device_printf(dev, "validate_mac_addr failed: %d\n", error); goto err_mac_hmc; } bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN); iflib_set_mac(ctx, hw->mac.addr); i40e_get_port_mac_addr(hw, hw->mac.port_addr); /* Set up the device filtering */ bzero(&filter, sizeof(filter)); filter.enable_ethtype = TRUE; filter.enable_macvlan = TRUE; filter.enable_fdir = FALSE; filter.hash_lut_size = I40E_HASH_LUT_SIZE_512; if (i40e_set_filter_control(hw, &filter)) device_printf(dev, "i40e_set_filter_control() failed\n"); /* Query device FW LLDP status */ if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) { if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) { atomic_set_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); } else { atomic_clear_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); } } /* Tell FW to apply DCB config on link up */ i40e_aq_set_dcb_parameters(hw, true, NULL); /* Fill out iflib parameters */ ixl_setup_ssctx(pf); INIT_DBG_DEV(dev, "end"); return (0); err_mac_hmc: ixl_shutdown_hmc(pf); err_get_cap: i40e_shutdown_adminq(hw); err_out: ixl_free_pci_resources(pf); err_pci_res: mtx_lock(&pf->admin_mtx); callout_stop(&pf->admin_timer); mtx_unlock(&pf->admin_mtx); mtx_destroy(&pf->admin_mtx); return (error); } static int ixl_if_attach_post(if_ctx_t ctx) { device_t dev; struct ixl_pf *pf; struct i40e_hw *hw; struct ixl_vsi *vsi; int error = 0; enum i40e_status_code status; dev = iflib_get_dev(ctx); pf = iflib_get_softc(ctx); INIT_DBG_DEV(dev, "begin"); vsi = &pf->vsi; vsi->ifp = iflib_get_ifp(ctx); hw = &pf->hw; /* Save off determined number of queues for interface */ vsi->num_rx_queues = vsi->shared->isc_nrxqsets; vsi->num_tx_queues = vsi->shared->isc_ntxqsets; /* Setup OS network interface / ifnet */ if (ixl_setup_interface(dev, pf)) { device_printf(dev, "interface setup failed!\n"); error = EIO; goto err; } if (IXL_PF_IN_RECOVERY_MODE(pf)) { /* Keep admin queue interrupts active while driver is loaded */ if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { ixl_configure_intr0_msix(pf); ixl_enable_intr0(hw); } ixl_add_sysctls_recovery_mode(pf); /* Start the admin timer */ mtx_lock(&pf->admin_mtx); callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf); mtx_unlock(&pf->admin_mtx); return (0); } /* Determine link state */ if (ixl_attach_get_link_status(pf)) { error = EINVAL; goto err; } error = ixl_switch_config(pf); if (error) { device_printf(dev, "Initial ixl_switch_config() failed: %d\n", error); goto err; } /* Add protocol filters to list */ ixl_init_filters(vsi); /* Init queue allocation manager */ error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp); if (error) { device_printf(dev, "Failed to init queue manager for PF queues, error %d\n", error); goto err; } /* reserve a contiguous allocation for the PF's VSI */ error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag); if (error) { device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n", error); goto err; } device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n", pf->qtag.num_allocated, pf->qtag.num_active); /* Limit PHY interrupts to link, autoneg, and modules failure */ status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK, NULL); if (status) { device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s," " aq_err %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); goto err; } /* Get the bus configuration and set the shared code */ ixl_get_bus_info(pf); /* Keep admin queue interrupts active while driver is loaded */ if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { ixl_configure_intr0_msix(pf); ixl_enable_intr0(hw); } /* Set initial advertised speed sysctl value */ ixl_set_initial_advertised_speeds(pf); /* Initialize statistics & add sysctls */ ixl_add_device_sysctls(pf); ixl_pf_reset_stats(pf); ixl_update_stats_counters(pf); ixl_add_hw_stats(pf); hw->phy.get_link_info = true; i40e_get_link_status(hw, &pf->link_up); ixl_update_link_status(pf); #ifdef PCI_IOV ixl_initialize_sriov(pf); #endif #ifdef IXL_IW if (hw->func_caps.iwarp && ixl_enable_iwarp) { pf->iw_enabled = (pf->iw_msix > 0) ? true : false; if (pf->iw_enabled) { error = ixl_iw_pf_attach(pf); if (error) { device_printf(dev, "interfacing to iWARP driver failed: %d\n", error); goto err; } else device_printf(dev, "iWARP ready\n"); } else device_printf(dev, "iWARP disabled on this device " "(no MSI-X vectors)\n"); } else { pf->iw_enabled = false; device_printf(dev, "The device is not iWARP enabled\n"); } #endif /* Start the admin timer */ mtx_lock(&pf->admin_mtx); callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf); mtx_unlock(&pf->admin_mtx); INIT_DBG_DEV(dev, "end"); return (0); err: INIT_DEBUGOUT("end: error %d", error); /* ixl_if_detach() is called on error from this */ return (error); } /** * XXX: iflib always ignores the return value of detach() * -> This means that this isn't allowed to fail */ static int ixl_if_detach(if_ctx_t ctx) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; enum i40e_status_code status; #ifdef IXL_IW int error; #endif INIT_DBG_DEV(dev, "begin"); /* Stop the admin timer */ mtx_lock(&pf->admin_mtx); callout_stop(&pf->admin_timer); mtx_unlock(&pf->admin_mtx); mtx_destroy(&pf->admin_mtx); #ifdef IXL_IW if (ixl_enable_iwarp && pf->iw_enabled) { error = ixl_iw_pf_detach(pf); if (error == EBUSY) { device_printf(dev, "iwarp in use; stop it first.\n"); //return (error); } } #endif /* Remove all previously allocated media types */ ifmedia_removeall(vsi->media); /* Shutdown LAN HMC */ ixl_shutdown_hmc(pf); /* Shutdown admin queue */ ixl_disable_intr0(hw); status = i40e_shutdown_adminq(hw); if (status) device_printf(dev, "i40e_shutdown_adminq() failed with status %s\n", i40e_stat_str(hw, status)); ixl_pf_qmgr_destroy(&pf->qmgr); ixl_free_pci_resources(pf); ixl_free_mac_filters(vsi); INIT_DBG_DEV(dev, "end"); return (0); } static int ixl_if_shutdown(if_ctx_t ctx) { int error = 0; INIT_DEBUGOUT("ixl_if_shutdown: begin"); /* TODO: Call ixl_if_stop()? */ /* TODO: Then setup low power mode */ return (error); } static int ixl_if_suspend(if_ctx_t ctx) { int error = 0; INIT_DEBUGOUT("ixl_if_suspend: begin"); /* TODO: Call ixl_if_stop()? */ /* TODO: Then setup low power mode */ return (error); } static int ixl_if_resume(if_ctx_t ctx) { struct ifnet *ifp = iflib_get_ifp(ctx); INIT_DEBUGOUT("ixl_if_resume: begin"); /* Read & clear wake-up registers */ /* Required after D3->D0 transition */ if (ifp->if_flags & IFF_UP) ixl_if_init(ctx); return (0); } void ixl_if_init(if_ctx_t ctx) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = &pf->hw; struct ifnet *ifp = iflib_get_ifp(ctx); device_t dev = iflib_get_dev(ctx); u8 tmpaddr[ETHER_ADDR_LEN]; int ret; if (IXL_PF_IN_RECOVERY_MODE(pf)) return; /* * If the aq is dead here, it probably means something outside of the driver * did something to the adapter, like a PF reset. * So, rebuild the driver's state here if that occurs. */ if (!i40e_check_asq_alive(&pf->hw)) { device_printf(dev, "Admin Queue is down; resetting...\n"); ixl_teardown_hw_structs(pf); ixl_rebuild_hw_structs_after_reset(pf, false); } /* Get the latest mac address... User might use a LAA */ bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETH_ALEN); if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) { ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); bcopy(tmpaddr, hw->mac.addr, ETH_ALEN); ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY, hw->mac.addr, NULL); if (ret) { device_printf(dev, "LLA address change failed!!\n"); return; } ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); } iflib_set_mac(ctx, hw->mac.addr); /* Prepare the VSI: rings, hmc contexts, etc... */ if (ixl_initialize_vsi(vsi)) { device_printf(dev, "initialize vsi failed!!\n"); return; } /* Reconfigure multicast filters in HW */ ixl_if_multi_set(ctx); /* Set up RSS */ ixl_config_rss(pf); /* Set up MSI-X routing and the ITR settings */ if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { ixl_configure_queue_intr_msix(pf); ixl_configure_itr(pf); } else ixl_configure_legacy(pf); if (vsi->enable_head_writeback) ixl_init_tx_cidx(vsi); else ixl_init_tx_rsqs(vsi); ixl_enable_rings(vsi); i40e_aq_set_default_vsi(hw, vsi->seid, NULL); /* Re-add configure filters to HW */ ixl_reconfigure_filters(vsi); /* Configure promiscuous mode */ ixl_if_promisc_set(ctx, if_getflags(ifp)); #ifdef IXL_IW if (ixl_enable_iwarp && pf->iw_enabled) { ret = ixl_iw_pf_init(pf); if (ret) device_printf(dev, "initialize iwarp failed, code %d\n", ret); } #endif } void ixl_if_stop(if_ctx_t ctx) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; INIT_DEBUGOUT("ixl_if_stop: begin\n"); if (IXL_PF_IN_RECOVERY_MODE(pf)) return; // TODO: This may need to be reworked #ifdef IXL_IW /* Stop iWARP device */ if (ixl_enable_iwarp && pf->iw_enabled) ixl_iw_pf_stop(pf); #endif ixl_disable_rings_intr(vsi); ixl_disable_rings(pf, vsi, &pf->qtag); } static int ixl_if_msix_intr_assign(if_ctx_t ctx, int msix) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct ixl_rx_queue *rx_que = vsi->rx_queues; struct ixl_tx_queue *tx_que = vsi->tx_queues; int err, i, rid, vector = 0; char buf[16]; MPASS(vsi->shared->isc_nrxqsets > 0); MPASS(vsi->shared->isc_ntxqsets > 0); /* Admin Que must use vector 0*/ rid = vector + 1; err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN, ixl_msix_adminq, pf, 0, "aq"); if (err) { iflib_irq_free(ctx, &vsi->irq); device_printf(iflib_get_dev(ctx), "Failed to register Admin Que handler"); return (err); } /* Create soft IRQ for handling VFLRs */ iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_IOV, pf, 0, "iov"); /* Now set up the stations */ for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) { rid = vector + 1; snprintf(buf, sizeof(buf), "rxq%d", i); err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, IFLIB_INTR_RX, ixl_msix_que, rx_que, rx_que->rxr.me, buf); /* XXX: Does the driver work as expected if there are fewer num_rx_queues than * what's expected in the iflib context? */ if (err) { device_printf(iflib_get_dev(ctx), "Failed to allocate queue RX int vector %d, err: %d\n", i, err); vsi->num_rx_queues = i + 1; goto fail; } rx_que->msix = vector; } bzero(buf, sizeof(buf)); for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) { snprintf(buf, sizeof(buf), "txq%d", i); iflib_softirq_alloc_generic(ctx, &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq, IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); /* TODO: Maybe call a strategy function for this to figure out which * interrupts to map Tx queues to. I don't know if there's an immediately * better way than this other than a user-supplied map, though. */ tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1; } return (0); fail: iflib_irq_free(ctx, &vsi->irq); rx_que = vsi->rx_queues; for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) iflib_irq_free(ctx, &rx_que->que_irq); return (err); } /* * Enable all interrupts * * Called in: * iflib_init_locked, after ixl_if_init() */ static void ixl_if_enable_intr(if_ctx_t ctx) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = vsi->hw; struct ixl_rx_queue *que = vsi->rx_queues; ixl_enable_intr0(hw); /* Enable queue interrupts */ for (int i = 0; i < vsi->num_rx_queues; i++, que++) /* TODO: Queue index parameter is probably wrong */ ixl_enable_queue(hw, que->rxr.me); } /* * Disable queue interrupts * * Other interrupt causes need to remain active. */ static void ixl_if_disable_intr(if_ctx_t ctx) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = vsi->hw; struct ixl_rx_queue *rx_que = vsi->rx_queues; if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) ixl_disable_queue(hw, rx_que->msix - 1); } else { // Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF // stops queues from triggering interrupts wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); } } static int ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = vsi->hw; struct ixl_rx_queue *rx_que = &vsi->rx_queues[rxqid]; ixl_enable_queue(hw, rx_que->msix - 1); return (0); } static int ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = vsi->hw; struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid]; ixl_enable_queue(hw, tx_que->msix - 1); return (0); } static int ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; if_softc_ctx_t scctx = vsi->shared; struct ixl_tx_queue *que; int i, j, error = 0; MPASS(scctx->isc_ntxqsets > 0); MPASS(ntxqs == 1); MPASS(scctx->isc_ntxqsets == ntxqsets); /* Allocate queue structure memory */ if (!(vsi->tx_queues = (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) { device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n"); return (ENOMEM); } for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) { struct tx_ring *txr = &que->txr; txr->me = i; que->vsi = vsi; if (!vsi->enable_head_writeback) { /* Allocate report status array */ if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) { device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n"); error = ENOMEM; goto fail; } /* Init report status array */ for (j = 0; j < scctx->isc_ntxd[0]; j++) txr->tx_rsq[j] = QIDX_INVALID; } /* get the virtual and physical address of the hardware queues */ txr->tail = I40E_QTX_TAIL(txr->me); txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs]; txr->tx_paddr = paddrs[i * ntxqs]; txr->que = que; } return (0); fail: ixl_if_queues_free(ctx); return (error); } static int ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct ixl_rx_queue *que; int i, error = 0; #ifdef INVARIANTS if_softc_ctx_t scctx = vsi->shared; MPASS(scctx->isc_nrxqsets > 0); MPASS(nrxqs == 1); MPASS(scctx->isc_nrxqsets == nrxqsets); #endif /* Allocate queue structure memory */ if (!(vsi->rx_queues = (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) * nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) { device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n"); error = ENOMEM; goto fail; } for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) { struct rx_ring *rxr = &que->rxr; rxr->me = i; que->vsi = vsi; /* get the virtual and physical address of the hardware queues */ rxr->tail = I40E_QRX_TAIL(rxr->me); rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs]; rxr->rx_paddr = paddrs[i * nrxqs]; rxr->que = que; } return (0); fail: ixl_if_queues_free(ctx); return (error); } static void ixl_if_queues_free(if_ctx_t ctx) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; if (!vsi->enable_head_writeback) { struct ixl_tx_queue *que; int i = 0; for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) { struct tx_ring *txr = &que->txr; if (txr->tx_rsq != NULL) { free(txr->tx_rsq, M_IXL); txr->tx_rsq = NULL; } } } if (vsi->tx_queues != NULL) { free(vsi->tx_queues, M_IXL); vsi->tx_queues = NULL; } if (vsi->rx_queues != NULL) { free(vsi->rx_queues, M_IXL); vsi->rx_queues = NULL; } if (!IXL_PF_IN_RECOVERY_MODE(pf)) sysctl_ctx_free(&vsi->sysctl_ctx); } void ixl_update_link_status(struct ixl_pf *pf) { struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = &pf->hw; u64 baudrate; if (pf->link_up) { if (vsi->link_active == FALSE) { vsi->link_active = TRUE; baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed); iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate); ixl_link_up_msg(pf); #ifdef PCI_IOV ixl_broadcast_link_state(pf); #endif } } else { /* Link down */ if (vsi->link_active == TRUE) { vsi->link_active = FALSE; iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0); #ifdef PCI_IOV ixl_broadcast_link_state(pf); #endif } } } static void ixl_handle_lan_overflow_event(struct ixl_pf *pf, struct i40e_arq_event_info *e) { device_t dev = pf->dev; u32 rxq_idx, qtx_ctl; rxq_idx = (e->desc.params.external.param0 & I40E_PRTDCB_RUPTQ_RXQNUM_MASK) >> I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT; qtx_ctl = e->desc.params.external.param1; device_printf(dev, "LAN overflow event: global rxq_idx %d\n", rxq_idx); device_printf(dev, "LAN overflow event: QTX_CTL 0x%08x\n", qtx_ctl); } static int ixl_process_adminq(struct ixl_pf *pf, u16 *pending) { enum i40e_status_code status = I40E_SUCCESS; struct i40e_arq_event_info event; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; u16 opcode; u32 loop = 0, reg; event.buf_len = IXL_AQ_BUF_SZ; event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO); if (!event.msg_buf) { device_printf(dev, "%s: Unable to allocate memory for Admin" " Queue event!\n", __func__); return (ENOMEM); } /* clean and process any events */ do { status = i40e_clean_arq_element(hw, &event, pending); if (status) break; opcode = LE16_TO_CPU(event.desc.opcode); ixl_dbg(pf, IXL_DBG_AQ, "Admin Queue event: %#06x\n", opcode); switch (opcode) { case i40e_aqc_opc_get_link_status: ixl_link_event(pf, &event); break; case i40e_aqc_opc_send_msg_to_pf: #ifdef PCI_IOV ixl_handle_vf_msg(pf, &event); #endif break; /* * This should only occur on no-drop queues, which * aren't currently configured. */ case i40e_aqc_opc_event_lan_overflow: ixl_handle_lan_overflow_event(pf, &event); break; default: break; } } while (*pending && (loop++ < IXL_ADM_LIMIT)); free(event.msg_buf, M_IXL); /* Re-enable admin queue interrupt cause */ reg = rd32(hw, I40E_PFINT_ICR0_ENA); reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); return (status); } static void ixl_if_update_admin_status(if_ctx_t ctx) { struct ixl_pf *pf = iflib_get_softc(ctx); struct i40e_hw *hw = &pf->hw; u16 pending; if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) ixl_handle_empr_reset(pf); /* * Admin Queue is shut down while handling reset. * Don't proceed if it hasn't been re-initialized * e.g due to an issue with new FW. */ if (!i40e_check_asq_alive(&pf->hw)) return; if (pf->state & IXL_PF_STATE_MDD_PENDING) ixl_handle_mdd_event(pf); ixl_process_adminq(pf, &pending); ixl_update_link_status(pf); /* * If there are still messages to process, reschedule ourselves. * Otherwise, re-enable our interrupt and go to sleep. */ if (pending > 0) iflib_admin_intr_deferred(ctx); else ixl_enable_intr0(hw); } static void ixl_if_multi_set(if_ctx_t ctx) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = vsi->hw; int mcnt, flags; int del_mcnt; IOCTL_DEBUGOUT("ixl_if_multi_set: begin"); mcnt = min(if_llmaddr_count(iflib_get_ifp(ctx)), MAX_MULTICAST_ADDR); /* Delete filters for removed multicast addresses */ del_mcnt = ixl_del_multi(vsi); vsi->num_macs -= del_mcnt; if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) { i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL); return; } /* (re-)install filters for all mcast addresses */ /* XXX: This bypasses filter count tracking code! */ mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixl_mc_filter_apply, vsi); if (mcnt > 0) { vsi->num_macs += mcnt; flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC); ixl_add_hw_filters(vsi, flags, mcnt); } ixl_dbg_filter(pf, "%s: filter mac total: %d\n", __func__, vsi->num_macs); IOCTL_DEBUGOUT("ixl_if_multi_set: end"); } static int ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) return (EINVAL); vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; return (0); } static void ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) { struct ixl_pf *pf = iflib_get_softc(ctx); struct i40e_hw *hw = &pf->hw; INIT_DEBUGOUT("ixl_media_status: begin"); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!pf->link_up) { return; } ifmr->ifm_status |= IFM_ACTIVE; /* Hardware is always full-duplex */ ifmr->ifm_active |= IFM_FDX; switch (hw->phy.link_info.phy_type) { /* 100 M */ case I40E_PHY_TYPE_100BASE_TX: ifmr->ifm_active |= IFM_100_TX; break; /* 1 G */ case I40E_PHY_TYPE_1000BASE_T: ifmr->ifm_active |= IFM_1000_T; break; case I40E_PHY_TYPE_1000BASE_SX: ifmr->ifm_active |= IFM_1000_SX; break; case I40E_PHY_TYPE_1000BASE_LX: ifmr->ifm_active |= IFM_1000_LX; break; case I40E_PHY_TYPE_1000BASE_T_OPTICAL: ifmr->ifm_active |= IFM_1000_T; + break; + /* 2.5 G */ + case I40E_PHY_TYPE_2_5GBASE_T: + ifmr->ifm_active |= IFM_2500_T; + break; + /* 5 G */ + case I40E_PHY_TYPE_5GBASE_T: + ifmr->ifm_active |= IFM_5000_T; break; /* 10 G */ case I40E_PHY_TYPE_10GBASE_SFPP_CU: ifmr->ifm_active |= IFM_10G_TWINAX; break; case I40E_PHY_TYPE_10GBASE_SR: ifmr->ifm_active |= IFM_10G_SR; break; case I40E_PHY_TYPE_10GBASE_LR: ifmr->ifm_active |= IFM_10G_LR; break; case I40E_PHY_TYPE_10GBASE_T: ifmr->ifm_active |= IFM_10G_T; break; case I40E_PHY_TYPE_XAUI: case I40E_PHY_TYPE_XFI: ifmr->ifm_active |= IFM_10G_TWINAX; break; case I40E_PHY_TYPE_10GBASE_AOC: ifmr->ifm_active |= IFM_10G_AOC; break; /* 25 G */ case I40E_PHY_TYPE_25GBASE_KR: ifmr->ifm_active |= IFM_25G_KR; break; case I40E_PHY_TYPE_25GBASE_CR: ifmr->ifm_active |= IFM_25G_CR; break; case I40E_PHY_TYPE_25GBASE_SR: ifmr->ifm_active |= IFM_25G_SR; break; case I40E_PHY_TYPE_25GBASE_LR: ifmr->ifm_active |= IFM_25G_LR; break; case I40E_PHY_TYPE_25GBASE_AOC: ifmr->ifm_active |= IFM_25G_AOC; break; case I40E_PHY_TYPE_25GBASE_ACC: ifmr->ifm_active |= IFM_25G_ACC; break; /* 40 G */ case I40E_PHY_TYPE_40GBASE_CR4: case I40E_PHY_TYPE_40GBASE_CR4_CU: ifmr->ifm_active |= IFM_40G_CR4; break; case I40E_PHY_TYPE_40GBASE_SR4: ifmr->ifm_active |= IFM_40G_SR4; break; case I40E_PHY_TYPE_40GBASE_LR4: ifmr->ifm_active |= IFM_40G_LR4; break; case I40E_PHY_TYPE_XLAUI: ifmr->ifm_active |= IFM_OTHER; break; case I40E_PHY_TYPE_1000BASE_KX: ifmr->ifm_active |= IFM_1000_KX; break; case I40E_PHY_TYPE_SGMII: ifmr->ifm_active |= IFM_1000_SGMII; break; /* ERJ: What's the difference between these? */ case I40E_PHY_TYPE_10GBASE_CR1_CU: case I40E_PHY_TYPE_10GBASE_CR1: ifmr->ifm_active |= IFM_10G_CR1; break; case I40E_PHY_TYPE_10GBASE_KX4: ifmr->ifm_active |= IFM_10G_KX4; break; case I40E_PHY_TYPE_10GBASE_KR: ifmr->ifm_active |= IFM_10G_KR; break; case I40E_PHY_TYPE_SFI: ifmr->ifm_active |= IFM_10G_SFI; break; /* Our single 20G media type */ case I40E_PHY_TYPE_20GBASE_KR2: ifmr->ifm_active |= IFM_20G_KR2; break; case I40E_PHY_TYPE_40GBASE_KR4: ifmr->ifm_active |= IFM_40G_KR4; break; case I40E_PHY_TYPE_XLPPI: case I40E_PHY_TYPE_40GBASE_AOC: ifmr->ifm_active |= IFM_40G_XLPPI; break; /* Unknown to driver */ default: ifmr->ifm_active |= IFM_UNKNOWN; break; } /* Report flow control status as well */ if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ifmr->ifm_active |= IFM_ETH_TXPAUSE; if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ifmr->ifm_active |= IFM_ETH_RXPAUSE; } static int ixl_if_media_change(if_ctx_t ctx) { struct ifmedia *ifm = iflib_get_media(ctx); INIT_DEBUGOUT("ixl_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n"); return (ENODEV); } static int ixl_if_promisc_set(if_ctx_t ctx, int flags) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct ifnet *ifp = iflib_get_ifp(ctx); struct i40e_hw *hw = vsi->hw; int err; bool uni = FALSE, multi = FALSE; if (flags & IFF_PROMISC) uni = multi = TRUE; else if (flags & IFF_ALLMULTI || if_llmaddr_count(ifp) >= MAX_MULTICAST_ADDR) multi = TRUE; err = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, uni, NULL, true); if (err) return (err); err = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, multi, NULL); return (err); } static void ixl_if_timer(if_ctx_t ctx, uint16_t qid) { struct ixl_pf *pf = iflib_get_softc(ctx); if (qid != 0) return; ixl_update_stats_counters(pf); } static void ixl_if_vlan_register(if_ctx_t ctx, u16 vtag) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = vsi->hw; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; ++vsi->num_vlans; ixl_add_filter(vsi, hw->mac.addr, vtag); } static void ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = vsi->hw; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; --vsi->num_vlans; ixl_del_filter(vsi, hw->mac.addr, vtag); } static uint64_t ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; if_t ifp = iflib_get_ifp(ctx); switch (cnt) { case IFCOUNTER_IPACKETS: return (vsi->ipackets); case IFCOUNTER_IERRORS: return (vsi->ierrors); case IFCOUNTER_OPACKETS: return (vsi->opackets); case IFCOUNTER_OERRORS: return (vsi->oerrors); case IFCOUNTER_COLLISIONS: /* Collisions are by standard impossible in 40G/10G Ethernet */ return (0); case IFCOUNTER_IBYTES: return (vsi->ibytes); case IFCOUNTER_OBYTES: return (vsi->obytes); case IFCOUNTER_IMCASTS: return (vsi->imcasts); case IFCOUNTER_OMCASTS: return (vsi->omcasts); case IFCOUNTER_IQDROPS: return (vsi->iqdrops); case IFCOUNTER_OQDROPS: return (vsi->oqdrops); case IFCOUNTER_NOPROTO: return (vsi->noproto); default: return (if_get_counter_default(ifp, cnt)); } } #ifdef PCI_IOV static void ixl_if_vflr_handle(if_ctx_t ctx) { struct ixl_pf *pf = iflib_get_softc(ctx); ixl_handle_vflr(pf); } #endif static int ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) { struct ixl_pf *pf = iflib_get_softc(ctx); if (pf->read_i2c_byte == NULL) return (EINVAL); for (int i = 0; i < req->len; i++) if (pf->read_i2c_byte(pf, req->offset + i, req->dev_addr, &req->data[i])) return (EIO); return (0); } static int ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ifdrv *ifd = (struct ifdrv *)data; int error = 0; /* * The iflib_if_ioctl forwards SIOCxDRVSPEC and SIOGPRIVATE_0 without * performing privilege checks. It is important that this function * perform the necessary checks for commands which should only be * executed by privileged threads. */ switch(command) { case SIOCGDRVSPEC: case SIOCSDRVSPEC: /* NVM update command */ if (ifd->ifd_cmd == I40E_NVM_ACCESS) { error = priv_check(curthread, PRIV_DRIVER); if (error) break; error = ixl_handle_nvmupd_cmd(pf, ifd); } else { error = EINVAL; } break; default: error = EOPNOTSUPP; } return (error); } /* ixl_if_needs_restart - Tell iflib when the driver needs to be reinitialized * @ctx: iflib context * @event: event code to check * * Defaults to returning false for every event. * * @returns true if iflib needs to reinit the interface, false otherwise */ static bool ixl_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) { switch (event) { case IFLIB_RESTART_VLAN_CONFIG: default: return (false); } } static u_int ixl_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int count __unused) { struct ixl_vsi *vsi = arg; ixl_add_mc_filter(vsi, (u8*)LLADDR(sdl)); return (1); } /* * Sanity check and save off tunable values. */ static void ixl_save_pf_tunables(struct ixl_pf *pf) { device_t dev = pf->dev; /* Save tunable information */ #ifdef IXL_DEBUG_FC pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter; #endif #ifdef IXL_DEBUG pf->recovery_mode = ixl_debug_recovery_mode; #endif pf->dbg_mask = ixl_core_debug_mask; pf->hw.debug_mask = ixl_shared_debug_mask; pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback); pf->enable_vf_loopback = !!(ixl_enable_vf_loopback); #if 0 pf->dynamic_rx_itr = ixl_dynamic_rx_itr; pf->dynamic_tx_itr = ixl_dynamic_tx_itr; #endif if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0) pf->i2c_access_method = 0; else pf->i2c_access_method = ixl_i2c_access_method; if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) { device_printf(dev, "Invalid tx_itr value of %d set!\n", ixl_tx_itr); device_printf(dev, "tx_itr must be between %d and %d, " "inclusive\n", 0, IXL_MAX_ITR); device_printf(dev, "Using default value of %d instead\n", IXL_ITR_4K); pf->tx_itr = IXL_ITR_4K; } else pf->tx_itr = ixl_tx_itr; if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) { device_printf(dev, "Invalid rx_itr value of %d set!\n", ixl_rx_itr); device_printf(dev, "rx_itr must be between %d and %d, " "inclusive\n", 0, IXL_MAX_ITR); device_printf(dev, "Using default value of %d instead\n", IXL_ITR_8K); pf->rx_itr = IXL_ITR_8K; } else pf->rx_itr = ixl_rx_itr; } Index: head/sys/dev/ixl/ixl_pf.h =================================================================== --- head/sys/dev/ixl/ixl_pf.h (revision 365230) +++ head/sys/dev/ixl/ixl_pf.h (revision 365231) @@ -1,428 +1,433 @@ /****************************************************************************** Copyright (c) 2013-2018, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _IXL_PF_H_ #define _IXL_PF_H_ #include "i40e_dcb.h" #include "ixl.h" #include "ixl_pf_qmgr.h" #define VF_FLAG_ENABLED 0x01 #define VF_FLAG_SET_MAC_CAP 0x02 #define VF_FLAG_VLAN_CAP 0x04 #define VF_FLAG_PROMISC_CAP 0x08 #define VF_FLAG_MAC_ANTI_SPOOF 0x10 #define IXL_ICR0_CRIT_ERR_MASK \ (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \ I40E_PFINT_ICR0_ECC_ERR_MASK | \ I40E_PFINT_ICR0_PE_CRITERR_MASK) /* VF Interrupts */ #define IXL_VPINT_LNKLSTN_REG(hw, vector, vf_num) \ I40E_VPINT_LNKLSTN(((vector) - 1) + \ (((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num))) #define IXL_VFINT_DYN_CTLN_REG(hw, vector, vf_num) \ I40E_VFINT_DYN_CTLN(((vector) - 1) + \ (((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num))) enum ixl_fw_mode { IXL_FW_MODE_NORMAL, IXL_FW_MODE_RECOVERY, IXL_FW_MODE_UEMPR }; enum ixl_i2c_access_method_t { IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE = 0, IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS = 1, IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD = 2, IXL_I2C_ACCESS_METHOD_AQ = 3, IXL_I2C_ACCESS_METHOD_TYPE_LENGTH = 4 }; /* Used in struct ixl_pf's state field */ enum ixl_pf_state { IXL_PF_STATE_RECOVERY_MODE = (1 << 0), IXL_PF_STATE_ADAPTER_RESETTING = (1 << 1), IXL_PF_STATE_MDD_PENDING = (1 << 2), IXL_PF_STATE_PF_RESET_REQ = (1 << 3), IXL_PF_STATE_VF_RESET_REQ = (1 << 4), IXL_PF_STATE_PF_CRIT_ERR = (1 << 5), IXL_PF_STATE_CORE_RESET_REQ = (1 << 6), IXL_PF_STATE_GLOB_RESET_REQ = (1 << 7), IXL_PF_STATE_EMP_RESET_REQ = (1 << 8), IXL_PF_STATE_FW_LLDP_DISABLED = (1 << 9), + IXL_PF_STATE_EEE_ENABLED = (1 << 10), }; #define IXL_PF_IN_RECOVERY_MODE(pf) \ ((atomic_load_acq_32(&pf->state) & IXL_PF_STATE_RECOVERY_MODE) != 0) struct ixl_vf { struct ixl_vsi vsi; u32 vf_flags; u32 num_mdd_events; u8 mac[ETHER_ADDR_LEN]; u16 vf_num; struct virtchnl_version_info version; struct ixl_pf_qtag qtag; }; /* Physical controller structure */ struct ixl_pf { struct ixl_vsi vsi; struct i40e_hw hw; struct i40e_osdep osdep; device_t dev; struct resource *pci_mem; #ifdef IXL_IW int iw_msix; bool iw_enabled; #endif u32 state; u8 supported_speeds; struct ixl_pf_qmgr qmgr; struct ixl_pf_qtag qtag; char admin_mtx_name[16]; /* name of the admin mutex */ struct mtx admin_mtx; /* mutex to protect the admin timer */ struct callout admin_timer; /* timer to trigger admin task */ /* Tunable values */ #ifdef IXL_DEBUG_FC bool enable_tx_fc_filter; #endif #ifdef IXL_DEBUG bool recovery_mode; #endif int dynamic_rx_itr; int dynamic_tx_itr; int tx_itr; int rx_itr; int enable_vf_loopback; bool link_up; int advertised_speed; int fc; /* link flow ctrl setting */ enum ixl_dbg_mask dbg_mask; bool has_i2c; /* Misc stats maintained by the driver */ u64 admin_irq; /* Statistics from hw */ struct i40e_hw_port_stats stats; struct i40e_hw_port_stats stats_offsets; bool stat_offsets_loaded; /* I2C access methods */ enum ixl_i2c_access_method_t i2c_access_method; s32 (*read_i2c_byte)(struct ixl_pf *pf, u8 byte_offset, u8 dev_addr, u8 *data); s32 (*write_i2c_byte)(struct ixl_pf *pf, u8 byte_offset, u8 dev_addr, u8 data); /* SR-IOV */ struct ixl_vf *vfs; int num_vfs; uint16_t veb_seid; int vc_debug_lvl; }; /* * Defines used for NVM update ioctls. * This value is used in the Solaris tool, too. */ #define I40E_NVM_ACCESS \ (((((((('E' << 4) + '1') << 4) + 'K') << 4) + 'G') << 4) | 5) #define IXL_DEFAULT_PHY_INT_MASK \ ((~(I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL \ | I40E_AQ_EVENT_MEDIA_NA)) & 0x3FF) /*** Sysctl help messages; displayed with "sysctl -d" ***/ #define IXL_SYSCTL_HELP_SET_ADVERTISE \ "\nControl advertised link speed.\n" \ "Flags:\n" \ "\t 0x1 - advertise 100M\n" \ "\t 0x2 - advertise 1G\n" \ "\t 0x4 - advertise 10G\n" \ "\t 0x8 - advertise 20G\n" \ "\t0x10 - advertise 25G\n" \ -"\t0x20 - advertise 40G\n\n" \ +"\t0x20 - advertise 40G\n" \ +"\t0x40 - advertise 2.5G\n" \ +"\t0x80 - advertise 5G\n\n" \ "Set to 0 to disable link.\n" \ "Use \"sysctl -x\" to view flags properly." #define IXL_SYSCTL_HELP_SUPPORTED_SPEED \ "\nSupported link speeds.\n" \ "Flags:\n" \ "\t 0x1 - 100M\n" \ "\t 0x2 - 1G\n" \ "\t 0x4 - 10G\n" \ "\t 0x8 - 20G\n" \ "\t0x10 - 25G\n" \ -"\t0x20 - 40G\n\n" \ +"\t0x20 - 40G\n" \ +"\t0x40 - 2.5G\n" \ +"\t0x80 - 5G\n\n" \ "Use \"sysctl -x\" to view flags properly." #define IXL_SYSCTL_HELP_FC \ "\nSet flow control mode using the values below.\n" \ "\t0 - off\n" \ "\t1 - rx pause\n" \ "\t2 - tx pause\n" \ "\t3 - tx and rx pause" #define IXL_SYSCTL_HELP_LINK_STATUS \ "\nExecutes a \"Get Link Status\" command on the Admin Queue, and displays" \ " the response." #define IXL_SYSCTL_HELP_FW_LLDP \ "\nFW LLDP engine:\n" \ "\t0 - disable\n" \ "\t1 - enable\n" #define IXL_SYSCTL_HELP_READ_I2C \ "\nRead a byte from I2C bus\n" \ "Input: 32-bit value\n" \ "\tbits 0-7: device address (0xA0 or 0xA2)\n" \ "\tbits 8-15: offset (0-255)\n" \ "\tbits 16-31: unused\n" \ "Output: 8-bit value read" #define IXL_SYSCTL_HELP_WRITE_I2C \ "\nWrite a byte to the I2C bus\n" \ "Input: 32-bit value\n" \ "\tbits 0-7: device address (0xA0 or 0xA2)\n" \ "\tbits 8-15: offset (0-255)\n" \ "\tbits 16-23: value to write\n" \ "\tbits 24-31: unused\n" \ "Output: 8-bit value written" #define IXL_SYSCTL_HELP_I2C_METHOD \ "\nI2C access method that driver will use:\n" \ "\t0 - best available method\n" \ "\t1 - bit bang via I2CPARAMS register\n" \ "\t2 - register read/write via I2CCMD register\n" \ "\t3 - Use Admin Queue command (best)\n" \ "Using the Admin Queue is only supported on 710 devices with FW version 1.7 or higher" #define IXL_SYSCTL_HELP_VF_LOOPBACK \ "\nDetermines mode that embedded device switch will use when SR-IOV is initialized:\n" \ "\t0 - Disable (VEPA)\n" \ "\t1 - Enable (VEB)\n" \ "Enabling this will allow VFs in separate VMs to communicate over the hardware bridge." MALLOC_DECLARE(M_IXL); /*** Functions / Macros ***/ /* Adjust the level here to 10 or over to print stats messages */ #define I40E_VC_DEBUG(p, level, ...) \ do { \ if (level < 10) \ ixl_dbg(p, IXL_DBG_IOV_VC, ##__VA_ARGS__); \ } while (0) #define i40e_send_vf_nack(pf, vf, op, st) \ ixl_send_vf_nack_msg((pf), (vf), (op), (st), __FILE__, __LINE__) /* Debug printing */ #define ixl_dbg(pf, m, s, ...) ixl_debug_core((pf)->dev, (pf)->dbg_mask, m, s, ##__VA_ARGS__) #define ixl_dbg_info(pf, s, ...) ixl_debug_core((pf)->dev, (pf)->dbg_mask, IXL_DBG_INFO, s, ##__VA_ARGS__) #define ixl_dbg_filter(pf, s, ...) ixl_debug_core((pf)->dev, (pf)->dbg_mask, IXL_DBG_FILTER, s, ##__VA_ARGS__) #define ixl_dbg_iov(pf, s, ...) ixl_debug_core((pf)->dev, (pf)->dbg_mask, IXL_DBG_IOV, s, ##__VA_ARGS__) /* PF-only function declarations */ int ixl_setup_interface(device_t, struct ixl_pf *); void ixl_print_nvm_cmd(device_t, struct i40e_nvm_access *); void ixl_handle_que(void *context, int pending); void ixl_init(void *); void ixl_local_timer(void *); void ixl_register_vlan(void *, struct ifnet *, u16); void ixl_unregister_vlan(void *, struct ifnet *, u16); int ixl_intr(void *); int ixl_msix_que(void *); int ixl_msix_adminq(void *); void ixl_do_adminq(void *, int); int ixl_res_alloc_cmp(const void *, const void *); const char * ixl_switch_res_type_string(u8); void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *, struct sysctl_oid_list *, struct i40e_hw_port_stats *); void ixl_media_status(struct ifnet *, struct ifmediareq *); int ixl_media_change(struct ifnet *); int ixl_ioctl(struct ifnet *, u_long, caddr_t); void ixl_enable_queue(struct i40e_hw *, int); void ixl_disable_queue(struct i40e_hw *, int); void ixl_enable_intr0(struct i40e_hw *); void ixl_disable_intr0(struct i40e_hw *); void ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf); void ixl_stat_update48(struct i40e_hw *, u32, u32, bool, u64 *, u64 *); void ixl_stat_update32(struct i40e_hw *, u32, bool, u64 *, u64 *); void ixl_stop(struct ixl_pf *); void ixl_vsi_add_sysctls(struct ixl_vsi *, const char *, bool); int ixl_get_hw_capabilities(struct ixl_pf *); void ixl_link_up_msg(struct ixl_pf *); void ixl_update_link_status(struct ixl_pf *); int ixl_setup_stations(struct ixl_pf *); int ixl_switch_config(struct ixl_pf *); void ixl_stop_locked(struct ixl_pf *); int ixl_teardown_hw_structs(struct ixl_pf *); void ixl_init_locked(struct ixl_pf *); void ixl_set_rss_key(struct ixl_pf *); void ixl_set_rss_pctypes(struct ixl_pf *); void ixl_set_rss_hlut(struct ixl_pf *); int ixl_setup_adminq_msix(struct ixl_pf *); int ixl_setup_adminq_tq(struct ixl_pf *); void ixl_teardown_adminq_msix(struct ixl_pf *); void ixl_configure_intr0_msix(struct ixl_pf *); void ixl_configure_queue_intr_msix(struct ixl_pf *); void ixl_free_adminq_tq(struct ixl_pf *); int ixl_setup_legacy(struct ixl_pf *); int ixl_init_msix(struct ixl_pf *); void ixl_configure_tx_itr(struct ixl_pf *); void ixl_configure_rx_itr(struct ixl_pf *); void ixl_configure_itr(struct ixl_pf *); void ixl_configure_legacy(struct ixl_pf *); void ixl_free_pci_resources(struct ixl_pf *); void ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *); void ixl_config_rss(struct ixl_pf *); int ixl_set_advertised_speeds(struct ixl_pf *, int, bool); void ixl_set_initial_advertised_speeds(struct ixl_pf *); void ixl_print_nvm_version(struct ixl_pf *pf); void ixl_add_sysctls_recovery_mode(struct ixl_pf *); void ixl_add_device_sysctls(struct ixl_pf *); void ixl_handle_mdd_event(struct ixl_pf *); void ixl_add_hw_stats(struct ixl_pf *); void ixl_update_stats_counters(struct ixl_pf *); void ixl_pf_reset_stats(struct ixl_pf *); void ixl_get_bus_info(struct ixl_pf *pf); int ixl_aq_get_link_status(struct ixl_pf *, struct i40e_aqc_get_link_status *); int ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *); int ixl_handle_i2c_eeprom_read_cmd(struct ixl_pf *, struct ifreq *ifr); int ixl_setup_hmc(struct ixl_pf *); void ixl_shutdown_hmc(struct ixl_pf *); void ixl_handle_empr_reset(struct ixl_pf *); int ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up); int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *, bool is_up); int ixl_pf_reset(struct ixl_pf *); void ixl_set_queue_rx_itr(struct ixl_rx_queue *); void ixl_set_queue_tx_itr(struct ixl_tx_queue *); void ixl_add_filter(struct ixl_vsi *, const u8 *, s16 vlan); void ixl_del_filter(struct ixl_vsi *, const u8 *, s16 vlan); void ixl_reconfigure_filters(struct ixl_vsi *vsi); int ixl_disable_rings(struct ixl_pf *, struct ixl_vsi *, struct ixl_pf_qtag *); int ixl_disable_tx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16); int ixl_disable_rx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16); int ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *, u16); int ixl_enable_rings(struct ixl_vsi *); int ixl_enable_tx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16); int ixl_enable_rx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16); int ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *, u16); void ixl_update_eth_stats(struct ixl_vsi *); void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int); int ixl_initialize_vsi(struct ixl_vsi *); void ixl_add_ifmedia(struct ifmedia *, u64); int ixl_setup_queue_msix(struct ixl_vsi *); int ixl_setup_queue_tqs(struct ixl_vsi *); int ixl_teardown_queue_msix(struct ixl_vsi *); void ixl_free_queue_tqs(struct ixl_vsi *); void ixl_enable_intr(struct ixl_vsi *); void ixl_disable_rings_intr(struct ixl_vsi *); void ixl_set_promisc(struct ixl_vsi *); void ixl_add_multi(struct ixl_vsi *); int ixl_del_multi(struct ixl_vsi *); void ixl_setup_vlan_filters(struct ixl_vsi *); void ixl_init_filters(struct ixl_vsi *); void ixl_add_hw_filters(struct ixl_vsi *, int, int); void ixl_del_hw_filters(struct ixl_vsi *, int); void ixl_del_default_hw_filters(struct ixl_vsi *); struct ixl_mac_filter * ixl_find_filter(struct ixl_vsi *, const u8 *, s16); void ixl_add_mc_filter(struct ixl_vsi *, u8 *); void ixl_free_mac_filters(struct ixl_vsi *vsi); void ixl_update_vsi_stats(struct ixl_vsi *); void ixl_vsi_reset_stats(struct ixl_vsi *); void ixl_vsi_free_queues(struct ixl_vsi *vsi); void ixl_if_init(if_ctx_t ctx); void ixl_if_stop(if_ctx_t ctx); /* * I2C Function prototypes */ int ixl_find_i2c_interface(struct ixl_pf *); s32 ixl_read_i2c_byte_bb(struct ixl_pf *pf, u8 byte_offset, u8 dev_addr, u8 *data); s32 ixl_write_i2c_byte_bb(struct ixl_pf *pf, u8 byte_offset, u8 dev_addr, u8 data); s32 ixl_read_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset, u8 dev_addr, u8 *data); s32 ixl_write_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset, u8 dev_addr, u8 data); s32 ixl_read_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset, u8 dev_addr, u8 *data); s32 ixl_write_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset, u8 dev_addr, u8 data); u64 ixl_max_aq_speed_to_value(u8); int ixl_attach_get_link_status(struct ixl_pf *); int ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS); #endif /* _IXL_PF_H_ */ Index: head/sys/dev/ixl/ixl_pf_main.c =================================================================== --- head/sys/dev/ixl/ixl_pf_main.c (revision 365230) +++ head/sys/dev/ixl/ixl_pf_main.c (revision 365231) @@ -1,4219 +1,4317 @@ /****************************************************************************** Copyright (c) 2013-2018, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixl_pf.h" #ifdef PCI_IOV #include "ixl_pf_iov.h" #endif #ifdef IXL_IW #include "ixl_iw.h" #include "ixl_iw_int.h" #endif static u8 ixl_convert_sysctl_aq_link_speed(u8, bool); static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool); static const char * ixl_link_speed_string(enum i40e_aq_link_speed); static u_int ixl_add_maddr(void *, struct sockaddr_dl *, u_int); static u_int ixl_match_maddr(void *, struct sockaddr_dl *, u_int); static char * ixl_switch_element_string(struct sbuf *, u8, u16); static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *); /* Sysctls */ static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS); +static int ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS); + /* Debug Sysctls */ static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS); /* Debug Sysctls */ static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS); #ifdef IXL_DEBUG static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS); #endif #ifdef IXL_IW extern int ixl_enable_iwarp; extern int ixl_limit_iwarp_msix; #endif static const char * const ixl_fc_string[6] = { "None", "Rx", "Tx", "Full", "Priority", "Default" }; static char *ixl_fec_string[3] = { "CL108 RS-FEC", "CL74 FC-FEC/BASE-R", "None" }; MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations"); /* ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string */ void ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf) { u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24); u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF); u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF); sbuf_printf(buf, "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d", hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, hw->aq.api_maj_ver, hw->aq.api_min_ver, (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >> IXL_NVM_VERSION_HI_SHIFT, (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >> IXL_NVM_VERSION_LO_SHIFT, hw->nvm.eetrack, oem_ver, oem_build, oem_patch); } void ixl_print_nvm_version(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *sbuf; sbuf = sbuf_new_auto(); ixl_nvm_version_str(hw, sbuf); sbuf_finish(sbuf); device_printf(dev, "%s\n", sbuf_data(sbuf)); sbuf_delete(sbuf); } /** * ixl_get_fw_mode - Check the state of FW * @hw: device hardware structure * * Identify state of FW. It might be in a recovery mode * which limits functionality and requires special handling * from the driver. * * @returns FW mode (normal, recovery, unexpected EMP reset) */ static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL; u32 fwsts; #ifdef IXL_DEBUG if (pf->recovery_mode) return IXL_FW_MODE_RECOVERY; #endif fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK; /* Is set and has one of expected values */ if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK && fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) || fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK || fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK) fw_mode = IXL_FW_MODE_RECOVERY; else { if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 && fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10) fw_mode = IXL_FW_MODE_UEMPR; } return (fw_mode); } /** * ixl_pf_reset - Reset the PF * @pf: PF structure * * Ensure that FW is in the right state and do the reset * if needed. * * @returns zero on success, or an error code on failure. */ int ixl_pf_reset(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; enum i40e_status_code status; enum ixl_fw_mode fw_mode; fw_mode = ixl_get_fw_mode(pf); ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode); if (fw_mode == IXL_FW_MODE_RECOVERY) { atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE); /* Don't try to reset device if it's in recovery mode */ return (0); } status = i40e_pf_reset(hw); if (status == I40E_SUCCESS) return (0); /* Check FW mode again in case it has changed while * waiting for reset to complete */ fw_mode = ixl_get_fw_mode(pf); ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode); if (fw_mode == IXL_FW_MODE_RECOVERY) { atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE); return (0); } if (fw_mode == IXL_FW_MODE_UEMPR) device_printf(pf->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n"); else device_printf(pf->dev, "PF reset failure %s\n", i40e_stat_str(hw, status)); return (EIO); } /** * ixl_setup_hmc - Setup LAN Host Memory Cache * @pf: PF structure * * Init and configure LAN Host Memory Cache * * @returns 0 on success, EIO on error */ int ixl_setup_hmc(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; enum i40e_status_code status; status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, hw->func_caps.num_rx_qp, 0, 0); if (status) { device_printf(pf->dev, "init_lan_hmc failed: %s\n", i40e_stat_str(hw, status)); return (EIO); } status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); if (status) { device_printf(pf->dev, "configure_lan_hmc failed: %s\n", i40e_stat_str(hw, status)); return (EIO); } return (0); } /** * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache * @pf: PF structure * * Shutdown Host Memory Cache if configured. * */ void ixl_shutdown_hmc(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; enum i40e_status_code status; /* HMC not configured, no need to shutdown */ if (hw->hmc.hmc_obj == NULL) return; status = i40e_shutdown_lan_hmc(hw); if (status) device_printf(pf->dev, "Shutdown LAN HMC failed with code %s\n", i40e_stat_str(hw, status)); } /* * Write PF ITR values to queue ITR registers. */ void ixl_configure_itr(struct ixl_pf *pf) { ixl_configure_tx_itr(pf); ixl_configure_rx_itr(pf); } /********************************************************************* * * Get the hardware capabilities * **********************************************************************/ int ixl_get_hw_capabilities(struct ixl_pf *pf) { struct i40e_aqc_list_capabilities_element_resp *buf; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; enum i40e_status_code status; int len, i2c_intfc_num; bool again = TRUE; u16 needed; if (IXL_PF_IN_RECOVERY_MODE(pf)) { hw->func_caps.iwarp = 0; return (0); } len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); retry: if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *) malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate cap memory\n"); return (ENOMEM); } /* This populates the hw struct */ status = i40e_aq_discover_capabilities(hw, buf, len, &needed, i40e_aqc_opc_list_func_capabilities, NULL); free(buf, M_DEVBUF); if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) && (again == TRUE)) { /* retry once with a larger buffer */ again = FALSE; len = needed; goto retry; } else if (status != I40E_SUCCESS) { device_printf(dev, "capability discovery failed; status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); return (ENODEV); } /* * Some devices have both MDIO and I2C; since this isn't reported * by the FW, check registers to see if an I2C interface exists. */ i2c_intfc_num = ixl_find_i2c_interface(pf); if (i2c_intfc_num != -1) pf->has_i2c = true; /* Determine functions to use for driver I2C accesses */ switch (pf->i2c_access_method) { case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: { if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { pf->read_i2c_byte = ixl_read_i2c_byte_aq; pf->write_i2c_byte = ixl_write_i2c_byte_aq; } else { pf->read_i2c_byte = ixl_read_i2c_byte_reg; pf->write_i2c_byte = ixl_write_i2c_byte_reg; } break; } case IXL_I2C_ACCESS_METHOD_AQ: pf->read_i2c_byte = ixl_read_i2c_byte_aq; pf->write_i2c_byte = ixl_write_i2c_byte_aq; break; case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD: pf->read_i2c_byte = ixl_read_i2c_byte_reg; pf->write_i2c_byte = ixl_write_i2c_byte_reg; break; case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS: pf->read_i2c_byte = ixl_read_i2c_byte_bb; pf->write_i2c_byte = ixl_write_i2c_byte_bb; break; default: /* Should not happen */ device_printf(dev, "Error setting I2C access functions\n"); break; } /* Print a subset of the capability information. */ device_printf(dev, "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n", hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors, hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp, (hw->func_caps.mdio_port_mode == 2) ? "I2C" : (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" : (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" : "MDIO shared"); return (0); } /* For the set_advertise sysctl */ void ixl_set_initial_advertised_speeds(struct ixl_pf *pf) { device_t dev = pf->dev; int err; /* Make sure to initialize the device to the complete list of * supported speeds on driver load, to ensure unloading and * reloading the driver will restore this value. */ err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true); if (err) { /* Non-fatal error */ device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n", __func__, err); return; } pf->advertised_speed = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); } int ixl_teardown_hw_structs(struct ixl_pf *pf) { enum i40e_status_code status = 0; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; /* Shutdown LAN HMC */ if (hw->hmc.hmc_obj) { status = i40e_shutdown_lan_hmc(hw); if (status) { device_printf(dev, "init: LAN HMC shutdown failure; status %s\n", i40e_stat_str(hw, status)); goto err_out; } } /* Shutdown admin queue */ ixl_disable_intr0(hw); status = i40e_shutdown_adminq(hw); if (status) device_printf(dev, "init: Admin Queue shutdown failure; status %s\n", i40e_stat_str(hw, status)); ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag); err_out: return (status); } static u_int ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { struct ixl_vsi *vsi = arg; ixl_add_mc_filter(vsi, (u8*)LLADDR(sdl)); return (1); } /********************************************************************* * Filter Routines * * Routines for multicast and vlan filter management. * *********************************************************************/ void ixl_add_multi(struct ixl_vsi *vsi) { struct ifnet *ifp = vsi->ifp; struct i40e_hw *hw = vsi->hw; int mcnt = 0, flags; IOCTL_DEBUGOUT("ixl_add_multi: begin"); /* ** First just get a count, to decide if we ** we simply use multicast promiscuous. */ mcnt = if_llmaddr_count(ifp); if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { /* delete existing MC filters */ ixl_del_hw_filters(vsi, mcnt); i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL); return; } mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, vsi); if (mcnt > 0) { flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC); ixl_add_hw_filters(vsi, flags, mcnt); } IOCTL_DEBUGOUT("ixl_add_multi: end"); } static u_int ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { struct ixl_mac_filter *f = arg; if (cmp_etheraddr(f->macaddr, (u8 *)LLADDR(sdl))) return (1); else return (0); } int ixl_del_multi(struct ixl_vsi *vsi) { struct ifnet *ifp = vsi->ifp; struct ixl_mac_filter *f; int mcnt = 0; IOCTL_DEBUGOUT("ixl_del_multi: begin"); /* Search for removed multicast addresses */ SLIST_FOREACH(f, &vsi->ftl, next) if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC) && (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0)) { f->flags |= IXL_FILTER_DEL; mcnt++; } if (mcnt > 0) ixl_del_hw_filters(vsi, mcnt); return (mcnt); } void ixl_link_up_msg(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ifnet *ifp = pf->vsi.ifp; char *req_fec_string, *neg_fec_string; u8 fec_abilities; fec_abilities = hw->phy.link_info.req_fec_info; /* If both RS and KR are requested, only show RS */ if (fec_abilities & I40E_AQ_REQUEST_FEC_RS) req_fec_string = ixl_fec_string[0]; else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR) req_fec_string = ixl_fec_string[1]; else req_fec_string = ixl_fec_string[2]; if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) neg_fec_string = ixl_fec_string[0]; else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) neg_fec_string = ixl_fec_string[1]; else neg_fec_string = ixl_fec_string[2]; log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", ifp->if_xname, ixl_link_speed_string(hw->phy.link_info.link_speed), req_fec_string, neg_fec_string, (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False", (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX && hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ? ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? ixl_fc_string[1] : ixl_fc_string[0]); } /* * Configure admin queue/misc interrupt cause registers in hardware. */ void ixl_configure_intr0_msix(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 reg; /* First set up the adminq - vector 0 */ wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ rd32(hw, I40E_PFINT_ICR0); /* read to clear */ reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | I40E_PFINT_ICR0_ENA_GRST_MASK | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | I40E_PFINT_ICR0_ENA_ADMINQ_MASK | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | I40E_PFINT_ICR0_ENA_VFLR_MASK | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); /* * 0x7FF is the end of the queue list. * This means we won't use MSI-X vector 0 for a queue interrupt * in MSI-X mode. */ wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */ wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E); wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); wr32(hw, I40E_PFINT_STAT_CTL0, 0); } void ixl_add_ifmedia(struct ifmedia *media, u64 phy_types) { /* Display supported media types */ if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX)) ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T)) ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX)) ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX)) ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL); + if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T)) + ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL); + + if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T)) + ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL); + if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) || phy_types & (I40E_CAP_PHY_TYPE_XFI) || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU)) ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR)) ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR)) ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T)) ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) || phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) || phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) || phy_types & (I40E_CAP_PHY_TYPE_XLAUI) || phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4)) ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4)) ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX)) ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1)) ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC)) ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_SFI)) ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4)) ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR)) ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2)) ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI)) ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR)) ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR)) ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR)) ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR)) ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC)) ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC)) ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL); } /********************************************************************* * * Get Firmware Switch configuration * - this will need to be more robust when more complex * switch configurations are enabled. * **********************************************************************/ int ixl_switch_config(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; device_t dev = iflib_get_dev(vsi->ctx); struct i40e_aqc_get_switch_config_resp *sw_config; u8 aq_buf[I40E_AQ_LARGE_BUF]; int ret; u16 next = 0; memset(&aq_buf, 0, sizeof(aq_buf)); sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; ret = i40e_aq_get_switch_config(hw, sw_config, sizeof(aq_buf), &next, NULL); if (ret) { device_printf(dev, "aq_get_switch_config() failed, error %d," " aq_error %d\n", ret, pf->hw.aq.asq_last_status); return (ret); } if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) { device_printf(dev, "Switch config: header reported: %d in structure, %d total\n", LE16_TO_CPU(sw_config->header.num_reported), LE16_TO_CPU(sw_config->header.num_total)); for (int i = 0; i < LE16_TO_CPU(sw_config->header.num_reported); i++) { device_printf(dev, "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i, sw_config->element[i].element_type, LE16_TO_CPU(sw_config->element[i].seid), LE16_TO_CPU(sw_config->element[i].uplink_seid), LE16_TO_CPU(sw_config->element[i].downlink_seid)); } } /* Simplified due to a single VSI */ vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid); vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid); vsi->seid = LE16_TO_CPU(sw_config->element[0].seid); return (ret); } void ixl_free_mac_filters(struct ixl_vsi *vsi) { struct ixl_mac_filter *f; while (!SLIST_EMPTY(&vsi->ftl)) { f = SLIST_FIRST(&vsi->ftl); SLIST_REMOVE_HEAD(&vsi->ftl, next); free(f, M_DEVBUF); } vsi->num_hw_filters = 0; } void ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls) { struct sysctl_oid *tree; struct sysctl_oid_list *child; struct sysctl_oid_list *vsi_list; tree = device_get_sysctl_tree(vsi->dev); child = SYSCTL_CHILDREN(tree); vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name, CTLFLAG_RD, NULL, "VSI Number"); vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats); if (queues_sysctls) ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx); } /* * Used to set the Tx ITR value for all of the PF LAN VSI's queues. * Writes to the ITR registers immediately. */ static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; int error = 0; int requested_tx_itr; requested_tx_itr = pf->tx_itr; error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req); if ((error) || (req->newptr == NULL)) return (error); if (pf->dynamic_tx_itr) { device_printf(dev, "Cannot set TX itr value while dynamic TX itr is enabled\n"); return (EINVAL); } if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) { device_printf(dev, "Invalid TX itr value; value must be between 0 and %d\n", IXL_MAX_ITR); return (EINVAL); } pf->tx_itr = requested_tx_itr; ixl_configure_tx_itr(pf); return (error); } /* * Used to set the Rx ITR value for all of the PF LAN VSI's queues. * Writes to the ITR registers immediately. */ static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; int error = 0; int requested_rx_itr; requested_rx_itr = pf->rx_itr; error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req); if ((error) || (req->newptr == NULL)) return (error); if (pf->dynamic_rx_itr) { device_printf(dev, "Cannot set RX itr value while dynamic RX itr is enabled\n"); return (EINVAL); } if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) { device_printf(dev, "Invalid RX itr value; value must be between 0 and %d\n", IXL_MAX_ITR); return (EINVAL); } pf->rx_itr = requested_rx_itr; ixl_configure_rx_itr(pf); return (error); } void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct i40e_hw_port_stats *stats) { struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics"); struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node); struct i40e_eth_stats *eth_stats = &stats->eth; ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats); struct ixl_sysctl_info ctls[] = { {&stats->crc_errors, "crc_errors", "CRC Errors"}, {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"}, {&stats->mac_local_faults, "local_faults", "MAC Local Faults"}, {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"}, {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"}, /* Packet Reception Stats */ {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"}, {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"}, {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"}, {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"}, {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"}, {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"}, {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"}, {&stats->rx_undersize, "rx_undersize", "Undersized packets received"}, {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"}, {&stats->rx_oversize, "rx_oversized", "Oversized packets received"}, {&stats->rx_jabber, "rx_jabber", "Received Jabber"}, {&stats->checksum_error, "checksum_errors", "Checksum Errors"}, /* Packet Transmission Stats */ {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"}, {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"}, {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"}, {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"}, {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"}, {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"}, {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"}, /* Flow control */ {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"}, {&stats->link_xon_rx, "xon_recvd", "Link XON received"}, {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"}, {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"}, /* End */ {0,0,0} }; struct ixl_sysctl_info *entry = ctls; while (entry->stat != 0) { SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name, CTLFLAG_RD, entry->stat, entry->description); entry++; } } void ixl_set_rss_key(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; device_t dev = pf->dev; u32 rss_seed[IXL_RSS_KEY_SIZE_REG]; enum i40e_status_code status; #ifdef RSS /* Fetch the configured RSS key */ rss_getkey((uint8_t *) &rss_seed); #else ixl_get_default_rss_key(rss_seed); #endif /* Fill out hash function seed */ if (hw->mac.type == I40E_MAC_X722) { struct i40e_aqc_get_set_rss_key_data key_data; bcopy(rss_seed, &key_data, 52); status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data); if (status) device_printf(dev, "i40e_aq_set_rss_key status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); } else { for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]); } } /* * Configure enabled PCTYPES for RSS. */ void ixl_set_rss_pctypes(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; u64 set_hena = 0, hena; #ifdef RSS u32 rss_hash_config; rss_hash_config = rss_gethashconfig(); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP); #else if (hw->mac.type == I40E_MAC_X722) set_hena = IXL_DEFAULT_RSS_HENA_X722; else set_hena = IXL_DEFAULT_RSS_HENA_XL710; #endif hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); hena |= set_hena; i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); } /* ** Setup the PF's RSS parameters. */ void ixl_config_rss(struct ixl_pf *pf) { ixl_set_rss_key(pf); ixl_set_rss_pctypes(pf); ixl_set_rss_hlut(pf); } /* * In some firmware versions there is default MAC/VLAN filter * configured which interferes with filters managed by driver. * Make sure it's removed. */ void ixl_del_default_hw_filters(struct ixl_vsi *vsi) { struct i40e_aqc_remove_macvlan_element_data e; bzero(&e, sizeof(e)); bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); e.vlan_tag = 0; e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); bzero(&e, sizeof(e)); bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); e.vlan_tag = 0; e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); } /* ** Initialize filter list and add filters that the hardware ** needs to know about. ** ** Requires VSI's seid to be set before calling. */ void ixl_init_filters(struct ixl_vsi *vsi) { struct ixl_pf *pf = (struct ixl_pf *)vsi->back; ixl_dbg_filter(pf, "%s: start\n", __func__); /* Initialize mac filter list for VSI */ SLIST_INIT(&vsi->ftl); vsi->num_hw_filters = 0; /* Receive broadcast Ethernet frames */ i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL); if (IXL_VSI_IS_VF(vsi)) return; ixl_del_default_hw_filters(vsi); ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY); /* * Prevent Tx flow control frames from being sent out by * non-firmware transmitters. * This affects every VSI in the PF. */ #ifndef IXL_DEBUG_FC i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid); #else if (pf->enable_tx_fc_filter) i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid); #endif } /* ** This routine adds mulicast filters */ void ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr) { struct ixl_mac_filter *f; /* Does one already exist */ f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY); if (f != NULL) return; f = ixl_new_filter(vsi, macaddr, IXL_VLAN_ANY); if (f != NULL) f->flags |= IXL_FILTER_MC; else printf("WARNING: no filter available!!\n"); } void ixl_reconfigure_filters(struct ixl_vsi *vsi) { ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs); } /* * This routine adds a MAC/VLAN filter to the software filter * list, then adds that new filter to the HW if it doesn't already * exist in the SW filter list. */ void ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) { struct ixl_mac_filter *f, *tmp; struct ixl_pf *pf; device_t dev; pf = vsi->back; dev = pf->dev; ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n", MAC_FORMAT_ARGS(macaddr), vlan); /* Does one already exist */ f = ixl_find_filter(vsi, macaddr, vlan); if (f != NULL) return; /* ** Is this the first vlan being registered, if so we ** need to remove the ANY filter that indicates we are ** not in a vlan, and replace that with a 0 filter. */ if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) { tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY); if (tmp != NULL) { ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY); ixl_add_filter(vsi, macaddr, 0); } } f = ixl_new_filter(vsi, macaddr, vlan); if (f == NULL) { device_printf(dev, "WARNING: no filter available!!\n"); return; } if (f->vlan != IXL_VLAN_ANY) f->flags |= IXL_FILTER_VLAN; else vsi->num_macs++; f->flags |= IXL_FILTER_USED; ixl_add_hw_filters(vsi, f->flags, 1); } void ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) { struct ixl_mac_filter *f; ixl_dbg_filter((struct ixl_pf *)vsi->back, "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n", MAC_FORMAT_ARGS(macaddr), vlan); f = ixl_find_filter(vsi, macaddr, vlan); if (f == NULL) return; f->flags |= IXL_FILTER_DEL; ixl_del_hw_filters(vsi, 1); if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0) vsi->num_macs--; /* Check if this is the last vlan removal */ if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) { /* Switch back to a non-vlan filter */ ixl_del_filter(vsi, macaddr, 0); ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY); } return; } /* ** Find the filter with both matching mac addr and vlan id */ struct ixl_mac_filter * ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) { struct ixl_mac_filter *f; SLIST_FOREACH(f, &vsi->ftl, next) { if ((cmp_etheraddr(f->macaddr, macaddr) != 0) && (f->vlan == vlan)) { return (f); } } return (NULL); } /* ** This routine takes additions to the vsi filter ** table and creates an Admin Queue call to create ** the filters in the hardware. */ void ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt) { struct i40e_aqc_add_macvlan_element_data *a, *b; struct ixl_mac_filter *f; struct ixl_pf *pf; struct i40e_hw *hw; device_t dev; enum i40e_status_code status; int j = 0; pf = vsi->back; dev = vsi->dev; hw = &pf->hw; ixl_dbg_filter(pf, "ixl_add_hw_filters: flags: %d cnt: %d\n", flags, cnt); if (cnt < 1) { ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n"); return; } a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt, M_DEVBUF, M_NOWAIT | M_ZERO); if (a == NULL) { device_printf(dev, "add_hw_filters failed to get memory\n"); return; } /* ** Scan the filter list, each time we find one ** we add it to the admin queue array and turn off ** the add bit. */ SLIST_FOREACH(f, &vsi->ftl, next) { if ((f->flags & flags) == flags) { b = &a[j]; // a pox on fvl long names :) bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN); if (f->vlan == IXL_VLAN_ANY) { b->vlan_tag = 0; b->flags = CPU_TO_LE16( I40E_AQC_MACVLAN_ADD_IGNORE_VLAN); } else { b->vlan_tag = CPU_TO_LE16(f->vlan); b->flags = 0; } b->flags |= CPU_TO_LE16( I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); f->flags &= ~IXL_FILTER_ADD; j++; ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n", MAC_FORMAT_ARGS(f->macaddr)); } if (j == cnt) break; } if (j > 0) { status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL); if (status) device_printf(dev, "i40e_aq_add_macvlan status %s, " "error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); else vsi->num_hw_filters += j; } free(a, M_DEVBUF); return; } /* ** This routine takes removals in the vsi filter ** table and creates an Admin Queue call to delete ** the filters in the hardware. */ void ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt) { struct i40e_aqc_remove_macvlan_element_data *d, *e; struct ixl_pf *pf; struct i40e_hw *hw; device_t dev; struct ixl_mac_filter *f, *f_temp; enum i40e_status_code status; int j = 0; pf = vsi->back; hw = &pf->hw; dev = vsi->dev; ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt); d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt, M_DEVBUF, M_NOWAIT | M_ZERO); if (d == NULL) { device_printf(dev, "%s: failed to get memory\n", __func__); return; } SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) { if (f->flags & IXL_FILTER_DEL) { e = &d[j]; // a pox on fvl long names :) bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN); e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; if (f->vlan == IXL_VLAN_ANY) { e->vlan_tag = 0; e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; } else { e->vlan_tag = f->vlan; } ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n", MAC_FORMAT_ARGS(f->macaddr)); /* delete entry from vsi list */ SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next); free(f, M_DEVBUF); j++; } if (j == cnt) break; } if (j > 0) { status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL); if (status) { int sc = 0; for (int i = 0; i < j; i++) sc += (!d[i].error_code); vsi->num_hw_filters -= sc; device_printf(dev, "Failed to remove %d/%d filters, error %s\n", j - sc, j, i40e_aq_str(hw, hw->aq.asq_last_status)); } else vsi->num_hw_filters -= j; } free(d, M_DEVBUF); ixl_dbg_filter(pf, "%s: end\n", __func__); return; } int ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) { struct i40e_hw *hw = &pf->hw; int error = 0; u32 reg; u16 pf_qidx; pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); ixl_dbg(pf, IXL_DBG_EN_DIS, "Enabling PF TX ring %4d / VSI TX ring %4d...\n", pf_qidx, vsi_qidx); i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE); reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); reg |= I40E_QTX_ENA_QENA_REQ_MASK | I40E_QTX_ENA_QENA_STAT_MASK; wr32(hw, I40E_QTX_ENA(pf_qidx), reg); /* Verify the enable took */ for (int j = 0; j < 10; j++) { reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); if (reg & I40E_QTX_ENA_QENA_STAT_MASK) break; i40e_usec_delay(10); } if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { device_printf(pf->dev, "TX queue %d still disabled!\n", pf_qidx); error = ETIMEDOUT; } return (error); } int ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) { struct i40e_hw *hw = &pf->hw; int error = 0; u32 reg; u16 pf_qidx; pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); ixl_dbg(pf, IXL_DBG_EN_DIS, "Enabling PF RX ring %4d / VSI RX ring %4d...\n", pf_qidx, vsi_qidx); reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); reg |= I40E_QRX_ENA_QENA_REQ_MASK | I40E_QRX_ENA_QENA_STAT_MASK; wr32(hw, I40E_QRX_ENA(pf_qidx), reg); /* Verify the enable took */ for (int j = 0; j < 10; j++) { reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); if (reg & I40E_QRX_ENA_QENA_STAT_MASK) break; i40e_usec_delay(10); } if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { device_printf(pf->dev, "RX queue %d still disabled!\n", pf_qidx); error = ETIMEDOUT; } return (error); } int ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) { int error = 0; error = ixl_enable_tx_ring(pf, qtag, vsi_qidx); /* Called function already prints error message */ if (error) return (error); error = ixl_enable_rx_ring(pf, qtag, vsi_qidx); return (error); } /* * Returns error on first ring that is detected hung. */ int ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) { struct i40e_hw *hw = &pf->hw; int error = 0; u32 reg; u16 pf_qidx; pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); ixl_dbg(pf, IXL_DBG_EN_DIS, "Disabling PF TX ring %4d / VSI TX ring %4d...\n", pf_qidx, vsi_qidx); i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE); i40e_usec_delay(500); reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; wr32(hw, I40E_QTX_ENA(pf_qidx), reg); /* Verify the disable took */ for (int j = 0; j < 10; j++) { reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK)) break; i40e_msec_delay(10); } if (reg & I40E_QTX_ENA_QENA_STAT_MASK) { device_printf(pf->dev, "TX queue %d still enabled!\n", pf_qidx); error = ETIMEDOUT; } return (error); } /* * Returns error on first ring that is detected hung. */ int ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) { struct i40e_hw *hw = &pf->hw; int error = 0; u32 reg; u16 pf_qidx; pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); ixl_dbg(pf, IXL_DBG_EN_DIS, "Disabling PF RX ring %4d / VSI RX ring %4d...\n", pf_qidx, vsi_qidx); reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; wr32(hw, I40E_QRX_ENA(pf_qidx), reg); /* Verify the disable took */ for (int j = 0; j < 10; j++) { reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK)) break; i40e_msec_delay(10); } if (reg & I40E_QRX_ENA_QENA_STAT_MASK) { device_printf(pf->dev, "RX queue %d still enabled!\n", pf_qidx); error = ETIMEDOUT; } return (error); } int ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) { int error = 0; error = ixl_disable_tx_ring(pf, qtag, vsi_qidx); /* Called function already prints error message */ if (error) return (error); error = ixl_disable_rx_ring(pf, qtag, vsi_qidx); return (error); } static void ixl_handle_tx_mdd_event(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct ixl_vf *vf; bool mdd_detected = false; bool pf_mdd_detected = false; bool vf_mdd_detected = false; u16 vf_num, queue; u8 pf_num, event; u8 pf_mdet_num, vp_mdet_num; u32 reg; /* find what triggered the MDD event */ reg = rd32(hw, I40E_GL_MDET_TX); if (reg & I40E_GL_MDET_TX_VALID_MASK) { pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> I40E_GL_MDET_TX_PF_NUM_SHIFT; vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> I40E_GL_MDET_TX_VF_NUM_SHIFT; event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> I40E_GL_MDET_TX_EVENT_SHIFT; queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> I40E_GL_MDET_TX_QUEUE_SHIFT; wr32(hw, I40E_GL_MDET_TX, 0xffffffff); mdd_detected = true; } if (!mdd_detected) return; reg = rd32(hw, I40E_PF_MDET_TX); if (reg & I40E_PF_MDET_TX_VALID_MASK) { wr32(hw, I40E_PF_MDET_TX, 0xFFFF); pf_mdet_num = hw->pf_id; pf_mdd_detected = true; } /* Check if MDD was caused by a VF */ for (int i = 0; i < pf->num_vfs; i++) { vf = &(pf->vfs[i]); reg = rd32(hw, I40E_VP_MDET_TX(i)); if (reg & I40E_VP_MDET_TX_VALID_MASK) { wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); vp_mdet_num = i; vf->num_mdd_events++; vf_mdd_detected = true; } } /* Print out an error message */ if (vf_mdd_detected && pf_mdd_detected) device_printf(dev, "Malicious Driver Detection event %d" " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n", event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num); else if (vf_mdd_detected && !pf_mdd_detected) device_printf(dev, "Malicious Driver Detection event %d" " on TX queue %d, pf number %d, vf number %d (VF-%d)\n", event, queue, pf_num, vf_num, vp_mdet_num); else if (!vf_mdd_detected && pf_mdd_detected) device_printf(dev, "Malicious Driver Detection event %d" " on TX queue %d, pf number %d (PF-%d)\n", event, queue, pf_num, pf_mdet_num); /* Theoretically shouldn't happen */ else device_printf(dev, "TX Malicious Driver Detection event (unknown)\n"); } static void ixl_handle_rx_mdd_event(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct ixl_vf *vf; bool mdd_detected = false; bool pf_mdd_detected = false; bool vf_mdd_detected = false; u16 queue; u8 pf_num, event; u8 pf_mdet_num, vp_mdet_num; u32 reg; /* * GL_MDET_RX doesn't contain VF number information, unlike * GL_MDET_TX. */ reg = rd32(hw, I40E_GL_MDET_RX); if (reg & I40E_GL_MDET_RX_VALID_MASK) { pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> I40E_GL_MDET_RX_FUNCTION_SHIFT; event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> I40E_GL_MDET_RX_EVENT_SHIFT; queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> I40E_GL_MDET_RX_QUEUE_SHIFT; wr32(hw, I40E_GL_MDET_RX, 0xffffffff); mdd_detected = true; } if (!mdd_detected) return; reg = rd32(hw, I40E_PF_MDET_RX); if (reg & I40E_PF_MDET_RX_VALID_MASK) { wr32(hw, I40E_PF_MDET_RX, 0xFFFF); pf_mdet_num = hw->pf_id; pf_mdd_detected = true; } /* Check if MDD was caused by a VF */ for (int i = 0; i < pf->num_vfs; i++) { vf = &(pf->vfs[i]); reg = rd32(hw, I40E_VP_MDET_RX(i)); if (reg & I40E_VP_MDET_RX_VALID_MASK) { wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); vp_mdet_num = i; vf->num_mdd_events++; vf_mdd_detected = true; } } /* Print out an error message */ if (vf_mdd_detected && pf_mdd_detected) device_printf(dev, "Malicious Driver Detection event %d" " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n", event, queue, pf_num, pf_mdet_num, vp_mdet_num); else if (vf_mdd_detected && !pf_mdd_detected) device_printf(dev, "Malicious Driver Detection event %d" " on RX queue %d, pf number %d, (VF-%d)\n", event, queue, pf_num, vp_mdet_num); else if (!vf_mdd_detected && pf_mdd_detected) device_printf(dev, "Malicious Driver Detection event %d" " on RX queue %d, pf number %d (PF-%d)\n", event, queue, pf_num, pf_mdet_num); /* Theoretically shouldn't happen */ else device_printf(dev, "RX Malicious Driver Detection event (unknown)\n"); } /** * ixl_handle_mdd_event * * Called from interrupt handler to identify possibly malicious vfs * (But also detects events from the PF, as well) **/ void ixl_handle_mdd_event(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 reg; /* * Handle both TX/RX because it's possible they could * both trigger in the same interrupt. */ ixl_handle_tx_mdd_event(pf); ixl_handle_rx_mdd_event(pf); atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING); /* re-enable mdd interrupt cause */ reg = rd32(hw, I40E_PFINT_ICR0_ENA); reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); ixl_flush(hw); } void ixl_enable_intr0(struct i40e_hw *hw) { u32 reg; /* Use IXL_ITR_NONE so ITR isn't updated here */ reg = I40E_PFINT_DYN_CTL0_INTENA_MASK | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); wr32(hw, I40E_PFINT_DYN_CTL0, reg); } void ixl_disable_intr0(struct i40e_hw *hw) { u32 reg; reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT; wr32(hw, I40E_PFINT_DYN_CTL0, reg); ixl_flush(hw); } void ixl_enable_queue(struct i40e_hw *hw, int id) { u32 reg; reg = I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); } void ixl_disable_queue(struct i40e_hw *hw, int id) { u32 reg; reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); } void ixl_handle_empr_reset(struct ixl_pf *pf) { struct ixl_vsi *vsi = &pf->vsi; bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING); ixl_prepare_for_reset(pf, is_up); /* * i40e_pf_reset checks the type of reset and acts * accordingly. If EMP or Core reset was performed * doing PF reset is not necessary and it sometimes * fails. */ ixl_pf_reset(pf); if (!IXL_PF_IN_RECOVERY_MODE(pf) && ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) { atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE); device_printf(pf->dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); pf->link_up = FALSE; ixl_update_link_status(pf); } ixl_rebuild_hw_structs_after_reset(pf, is_up); atomic_clear_32(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING); } void ixl_update_stats_counters(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; struct ixl_vf *vf; u64 prev_link_xoff_rx = pf->stats.link_xoff_rx; struct i40e_hw_port_stats *nsd = &pf->stats; struct i40e_hw_port_stats *osd = &pf->stats_offsets; /* Update hw stats */ ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), pf->stat_offsets_loaded, &osd->crc_errors, &nsd->crc_errors); ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), pf->stat_offsets_loaded, &osd->illegal_bytes, &nsd->illegal_bytes); ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), I40E_GLPRT_GORCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_bytes, &nsd->eth.rx_bytes); ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), I40E_GLPRT_GOTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_bytes, &nsd->eth.tx_bytes); ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_discards, &nsd->eth.rx_discards); ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), I40E_GLPRT_UPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_unicast, &nsd->eth.rx_unicast); ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), I40E_GLPRT_UPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_unicast, &nsd->eth.tx_unicast); ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), I40E_GLPRT_MPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_multicast, &nsd->eth.rx_multicast); ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), I40E_GLPRT_MPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_multicast, &nsd->eth.tx_multicast); ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), I40E_GLPRT_BPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_broadcast, &nsd->eth.rx_broadcast); ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), I40E_GLPRT_BPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_broadcast, &nsd->eth.tx_broadcast); ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), pf->stat_offsets_loaded, &osd->tx_dropped_link_down, &nsd->tx_dropped_link_down); ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), pf->stat_offsets_loaded, &osd->mac_local_faults, &nsd->mac_local_faults); ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), pf->stat_offsets_loaded, &osd->mac_remote_faults, &nsd->mac_remote_faults); ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), pf->stat_offsets_loaded, &osd->rx_length_errors, &nsd->rx_length_errors); /* Flow control (LFC) stats */ ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), pf->stat_offsets_loaded, &osd->link_xon_rx, &nsd->link_xon_rx); ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), pf->stat_offsets_loaded, &osd->link_xon_tx, &nsd->link_xon_tx); ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), pf->stat_offsets_loaded, &osd->link_xoff_rx, &nsd->link_xoff_rx); ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), pf->stat_offsets_loaded, &osd->link_xoff_tx, &nsd->link_xoff_tx); /* * For watchdog management we need to know if we have been paused * during the last interval, so capture that here. */ if (pf->stats.link_xoff_rx != prev_link_xoff_rx) vsi->shared->isc_pause_frames = 1; /* Packet size stats rx */ ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), I40E_GLPRT_PRC64L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_64, &nsd->rx_size_64); ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), I40E_GLPRT_PRC127L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_127, &nsd->rx_size_127); ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), I40E_GLPRT_PRC255L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_255, &nsd->rx_size_255); ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), I40E_GLPRT_PRC511L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_511, &nsd->rx_size_511); ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), I40E_GLPRT_PRC1023L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_1023, &nsd->rx_size_1023); ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), I40E_GLPRT_PRC1522L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_1522, &nsd->rx_size_1522); ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), I40E_GLPRT_PRC9522L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_big, &nsd->rx_size_big); /* Packet size stats tx */ ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), I40E_GLPRT_PTC64L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_64, &nsd->tx_size_64); ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), I40E_GLPRT_PTC127L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_127, &nsd->tx_size_127); ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), I40E_GLPRT_PTC255L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_255, &nsd->tx_size_255); ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), I40E_GLPRT_PTC511L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_511, &nsd->tx_size_511); ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), I40E_GLPRT_PTC1023L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_1023, &nsd->tx_size_1023); ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), I40E_GLPRT_PTC1522L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_1522, &nsd->tx_size_1522); ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), I40E_GLPRT_PTC9522L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_big, &nsd->tx_size_big); ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port), pf->stat_offsets_loaded, &osd->rx_undersize, &nsd->rx_undersize); ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port), pf->stat_offsets_loaded, &osd->rx_fragments, &nsd->rx_fragments); ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port), pf->stat_offsets_loaded, &osd->rx_oversize, &nsd->rx_oversize); ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port), pf->stat_offsets_loaded, &osd->rx_jabber, &nsd->rx_jabber); + /* EEE */ + i40e_get_phy_lpi_status(hw, nsd); + + i40e_lpi_stat_update(hw, pf->stat_offsets_loaded, + &osd->tx_lpi_count, &nsd->tx_lpi_count, + &osd->rx_lpi_count, &nsd->rx_lpi_count); + pf->stat_offsets_loaded = true; /* End hw stats */ /* Update vsi stats */ ixl_update_vsi_stats(vsi); for (int i = 0; i < pf->num_vfs; i++) { vf = &pf->vfs[i]; if (vf->vf_flags & VF_FLAG_ENABLED) ixl_update_eth_stats(&pf->vfs[i].vsi); } } /** * Update VSI-specific ethernet statistics counters. **/ void ixl_update_eth_stats(struct ixl_vsi *vsi) { struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_eth_stats *es; struct i40e_eth_stats *oes; u16 stat_idx = vsi->info.stat_counter_idx; es = &vsi->eth_stats; oes = &vsi->eth_stats_offsets; /* Gather up the stats that the hw collects */ ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx), vsi->stat_offsets_loaded, &oes->tx_errors, &es->tx_errors); ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx), vsi->stat_offsets_loaded, &oes->rx_discards, &es->rx_discards); ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx), I40E_GLV_GORCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_bytes, &es->rx_bytes); ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), I40E_GLV_UPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_unicast, &es->rx_unicast); ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), I40E_GLV_MPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_multicast, &es->rx_multicast); ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), I40E_GLV_BPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_broadcast, &es->rx_broadcast); ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), I40E_GLV_GOTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_bytes, &es->tx_bytes); ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), I40E_GLV_UPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_unicast, &es->tx_unicast); ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), I40E_GLV_MPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_multicast, &es->tx_multicast); ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), I40E_GLV_BPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_broadcast, &es->tx_broadcast); vsi->stat_offsets_loaded = true; } void ixl_update_vsi_stats(struct ixl_vsi *vsi) { struct ixl_pf *pf; struct ifnet *ifp; struct i40e_eth_stats *es; u64 tx_discards; struct i40e_hw_port_stats *nsd; pf = vsi->back; ifp = vsi->ifp; es = &vsi->eth_stats; nsd = &pf->stats; ixl_update_eth_stats(vsi); tx_discards = es->tx_discards + nsd->tx_dropped_link_down; /* Update ifnet stats */ IXL_SET_IPACKETS(vsi, es->rx_unicast + es->rx_multicast + es->rx_broadcast); IXL_SET_OPACKETS(vsi, es->tx_unicast + es->tx_multicast + es->tx_broadcast); IXL_SET_IBYTES(vsi, es->rx_bytes); IXL_SET_OBYTES(vsi, es->tx_bytes); IXL_SET_IMCASTS(vsi, es->rx_multicast); IXL_SET_OMCASTS(vsi, es->tx_multicast); IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes + nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments + nsd->rx_jabber); IXL_SET_OERRORS(vsi, es->tx_errors); IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards); IXL_SET_OQDROPS(vsi, tx_discards); IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol); IXL_SET_COLLISIONS(vsi, 0); } /** * Reset all of the stats for the given pf **/ void ixl_pf_reset_stats(struct ixl_pf *pf) { bzero(&pf->stats, sizeof(struct i40e_hw_port_stats)); bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats)); pf->stat_offsets_loaded = false; } /** * Resets all stats of the given vsi **/ void ixl_vsi_reset_stats(struct ixl_vsi *vsi) { bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats)); bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats)); vsi->stat_offsets_loaded = false; } /** * Read and update a 48 bit stat from the hw * * Since the device stats are not reset at PFReset, they likely will not * be zeroed when the driver starts. We'll save the first values read * and use them as offsets to be subtracted from the raw values in order * to report stats that count from zero. **/ void ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, bool offset_loaded, u64 *offset, u64 *stat) { u64 new_data; #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__) new_data = rd64(hw, loreg); #else /* * Use two rd32's instead of one rd64; FreeBSD versions before * 10 don't support 64-bit bus reads/writes. */ new_data = rd32(hw, loreg); new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; #endif if (!offset_loaded) *offset = new_data; if (new_data >= *offset) *stat = new_data - *offset; else *stat = (new_data + ((u64)1 << 48)) - *offset; *stat &= 0xFFFFFFFFFFFFULL; } /** * Read and update a 32 bit stat from the hw **/ void ixl_stat_update32(struct i40e_hw *hw, u32 reg, bool offset_loaded, u64 *offset, u64 *stat) { u32 new_data; new_data = rd32(hw, reg); if (!offset_loaded) *offset = new_data; if (new_data >= *offset) *stat = (u32)(new_data - *offset); else *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); } /** * Add subset of device sysctls safe to use in recovery mode */ void ixl_add_sysctls_recovery_mode(struct ixl_pf *pf) { device_t dev = pf->dev; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid_list *ctx_list = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); struct sysctl_oid *debug_node; struct sysctl_oid_list *debug_list; SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_show_fw, "A", "Firmware version"); /* Add sysctls meant to print debug information, but don't list them * in "sysctl -a" output. */ debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, "Debug Sysctls"); debug_list = SYSCTL_CHILDREN(debug_node); SYSCTL_ADD_UINT(ctx, debug_list, OID_AUTO, "shared_debug_mask", CTLFLAG_RW, &pf->hw.debug_mask, 0, "Shared code debug message level"); SYSCTL_ADD_UINT(ctx, debug_list, OID_AUTO, "core_debug_mask", CTLFLAG_RW, &pf->dbg_mask, 0, "Non-shared code debug message level"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "do_pf_reset", CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "do_core_reset", CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "do_global_reset", CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); } void ixl_add_device_sysctls(struct ixl_pf *pf) { device_t dev = pf->dev; struct i40e_hw *hw = &pf->hw; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid_list *ctx_list = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); struct sysctl_oid *debug_node; struct sysctl_oid_list *debug_list; struct sysctl_oid *fec_node; struct sysctl_oid_list *fec_list; + struct sysctl_oid *eee_node; + struct sysctl_oid_list *eee_list; /* Set up sysctls */ SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "supported_speeds", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_current_speed, "A", "Current Port Speed"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_show_fw, "A", "Firmware version"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_unallocated_queues, "I", "Queues not allocated to a PF or VF"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_pf_tx_itr, "I", "Immediately set TX ITR value for all queues"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_pf_rx_itr, "I", "Immediately set RX ITR value for all queues"); SYSCTL_ADD_INT(ctx, ctx_list, OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW, &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR"); SYSCTL_ADD_INT(ctx, ctx_list, OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW, &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR"); /* Add FEC sysctls for 25G adapters */ if (i40e_is_25G_device(hw->device_id)) { fec_node = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "FEC Sysctls"); fec_list = SYSCTL_CHILDREN(fec_node); SYSCTL_ADD_PROC(ctx, fec_list, OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled"); SYSCTL_ADD_PROC(ctx, fec_list, OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled"); SYSCTL_ADD_PROC(ctx, fec_list, OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link"); SYSCTL_ADD_PROC(ctx, fec_list, OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link"); SYSCTL_ADD_PROC(ctx, fec_list, OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes"); } SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP); + eee_node = SYSCTL_ADD_NODE(ctx, ctx_list, + OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, + "Energy Efficient Ethernet (EEE) Sysctls"); + eee_list = SYSCTL_CHILDREN(eee_node); + + SYSCTL_ADD_PROC(ctx, eee_list, + OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, + pf, 0, ixl_sysctl_eee_enable, "I", + "Enable Energy Efficient Ethernet (EEE)"); + + SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status", + CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0, + "TX LPI status"); + + SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status", + CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0, + "RX LPI status"); + + SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count", + CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count, + "TX LPI count"); + + SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count", + CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count, + "RX LPI count"); + /* Add sysctls meant to print debug information, but don't list them * in "sysctl -a" output. */ debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, "Debug Sysctls"); debug_list = SYSCTL_CHILDREN(debug_node); SYSCTL_ADD_UINT(ctx, debug_list, OID_AUTO, "shared_debug_mask", CTLFLAG_RW, &pf->hw.debug_mask, 0, "Shared code debug message level"); SYSCTL_ADD_UINT(ctx, debug_list, OID_AUTO, "core_debug_mask", CTLFLAG_RW, &pf->dbg_mask, 0, "Non-shared code debug message level"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_hkey, "A", "View RSS key"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "do_pf_reset", CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "do_core_reset", CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "do_global_reset", CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); if (pf->has_i2c) { SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW"); } } /* * Primarily for finding out how many queues can be assigned to VFs, * at runtime. */ static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int queues; queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr); return sysctl_handle_int(oidp, NULL, queues, req); } static const char * ixl_link_speed_string(enum i40e_aq_link_speed link_speed) { const char * link_speed_str[] = { "Unknown", "100 Mbps", "1 Gbps", "10 Gbps", "40 Gbps", "20 Gbps", "25 Gbps", + "2.5 Gbps", + "5 Gbps" }; int index; switch (link_speed) { case I40E_LINK_SPEED_100MB: index = 1; break; case I40E_LINK_SPEED_1GB: index = 2; break; case I40E_LINK_SPEED_10GB: index = 3; break; case I40E_LINK_SPEED_40GB: index = 4; break; case I40E_LINK_SPEED_20GB: index = 5; break; case I40E_LINK_SPEED_25GB: index = 6; break; + case I40E_LINK_SPEED_2_5GB: + index = 7; + break; + case I40E_LINK_SPEED_5GB: + index = 8; + break; case I40E_LINK_SPEED_UNKNOWN: default: index = 0; break; } return (link_speed_str[index]); } int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; int error = 0; ixl_update_link_status(pf); error = sysctl_handle_string(oidp, __DECONST(void *, ixl_link_speed_string(hw->phy.link_info.link_speed)), 8, req); return (error); } /* * Converts 8-bit speeds value to and from sysctl flags and * Admin Queue flags. */ static u8 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq) { -#define SPEED_MAP_SIZE 6 +#define SPEED_MAP_SIZE 8 static u16 speedmap[SPEED_MAP_SIZE] = { (I40E_LINK_SPEED_100MB | (0x1 << 8)), (I40E_LINK_SPEED_1GB | (0x2 << 8)), (I40E_LINK_SPEED_10GB | (0x4 << 8)), (I40E_LINK_SPEED_20GB | (0x8 << 8)), (I40E_LINK_SPEED_25GB | (0x10 << 8)), - (I40E_LINK_SPEED_40GB | (0x20 << 8)) + (I40E_LINK_SPEED_40GB | (0x20 << 8)), + (I40E_LINK_SPEED_2_5GB | (0x40 << 8)), + (I40E_LINK_SPEED_5GB | (0x80 << 8)), }; u8 retval = 0; for (int i = 0; i < SPEED_MAP_SIZE; i++) { if (to_aq) retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0; else retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0; } return (retval); } int ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_aq_set_phy_config config; enum i40e_status_code aq_error = 0; /* Get current capability information */ aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL); if (aq_error) { device_printf(dev, "%s: Error getting phy capabilities %d," " aq error: %d\n", __func__, aq_error, hw->aq.asq_last_status); return (EIO); } /* Prepare new config */ bzero(&config, sizeof(config)); if (from_aq) config.link_speed = speeds; else config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true); config.phy_type = abilities.phy_type; config.phy_type_ext = abilities.phy_type_ext; config.abilities = abilities.abilities | I40E_AQ_PHY_ENABLE_ATOMIC_LINK; config.eee_capability = abilities.eee_capability; config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; config.fec_config = abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_PHY_FEC_CONFIG_MASK; /* Do aq command & restart link */ aq_error = i40e_aq_set_phy_config(hw, &config, NULL); if (aq_error) { device_printf(dev, "%s: Error setting new phy config %d," " aq error: %d\n", __func__, aq_error, hw->aq.asq_last_status); return (EIO); } return (0); } /* ** Supported link speeds ** Flags: ** 0x1 - 100 Mb ** 0x2 - 1G ** 0x4 - 10G ** 0x8 - 20G ** 0x10 - 25G ** 0x20 - 40G +** 0x40 - 2.5G +** 0x80 - 5G */ static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); return sysctl_handle_int(oidp, NULL, supported, req); } /* ** Control link advertise speed: ** Flags: ** 0x1 - advertise 100 Mb ** 0x2 - advertise 1G ** 0x4 - advertise 10G ** 0x8 - advertise 20G ** 0x10 - advertise 25G ** 0x20 - advertise 40G +** 0x40 - advertise 2.5G +** 0x80 - advertise 5G ** ** Set to 0 to disable link */ int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; u8 converted_speeds; int requested_ls = 0; int error = 0; /* Read in new mode */ requested_ls = pf->advertised_speed; error = sysctl_handle_int(oidp, &requested_ls, 0, req); if ((error) || (req->newptr == NULL)) return (error); if (IXL_PF_IN_RECOVERY_MODE(pf)) { device_printf(dev, "Interface is currently in FW recovery mode. " "Setting advertise speed not supported\n"); return (EINVAL); } /* Error out if bits outside of possible flag range are set */ - if ((requested_ls & ~((u8)0x3F)) != 0) { + if ((requested_ls & ~((u8)0xFF)) != 0) { device_printf(dev, "Input advertised speed out of range; " "valid flags are: 0x%02x\n", ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); return (EINVAL); } /* Check if adapter supports input value */ converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true); if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) { device_printf(dev, "Invalid advertised speed; " "valid flags are: 0x%02x\n", ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); return (EINVAL); } error = ixl_set_advertised_speeds(pf, requested_ls, false); if (error) return (error); pf->advertised_speed = requested_ls; ixl_update_link_status(pf); return (0); } /* * Input: bitmap of enum i40e_aq_link_speed */ u64 ixl_max_aq_speed_to_value(u8 link_speeds) { if (link_speeds & I40E_LINK_SPEED_40GB) return IF_Gbps(40); if (link_speeds & I40E_LINK_SPEED_25GB) return IF_Gbps(25); if (link_speeds & I40E_LINK_SPEED_20GB) return IF_Gbps(20); if (link_speeds & I40E_LINK_SPEED_10GB) return IF_Gbps(10); + if (link_speeds & I40E_LINK_SPEED_5GB) + return IF_Gbps(5); + if (link_speeds & I40E_LINK_SPEED_2_5GB) + return IF_Mbps(2500); if (link_speeds & I40E_LINK_SPEED_1GB) return IF_Gbps(1); if (link_speeds & I40E_LINK_SPEED_100MB) return IF_Mbps(100); else /* Minimum supported link speed */ return IF_Mbps(100); } /* ** Get the width and transaction speed of ** the bus this adapter is plugged into. */ void ixl_get_bus_info(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; u16 link; u32 offset, num_ports; u64 max_speed; /* Some devices don't use PCIE */ if (hw->mac.type == I40E_MAC_X722) return; /* Read PCI Express Capabilities Link Status Register */ pci_find_cap(dev, PCIY_EXPRESS, &offset); link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); /* Fill out hw struct with PCIE info */ i40e_set_pci_config_data(hw, link); /* Use info to print out bandwidth messages */ device_printf(dev,"PCI Express Bus: Speed %s %s\n", ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s": (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s": (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"), (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" : (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" : (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" : (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" : ("Unknown")); /* * If adapter is in slot with maximum supported speed, * no warning message needs to be printed out. */ if (hw->bus.speed >= i40e_bus_speed_8000 && hw->bus.width >= i40e_bus_width_pcie_x8) return; num_ports = bitcount32(hw->func_caps.valid_functions); max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000; if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) { device_printf(dev, "PCI-Express bandwidth available" " for this device may be insufficient for" " optimal performance.\n"); device_printf(dev, "Please move the device to a different" " PCI-e link with more lanes and/or higher" " transfer rate.\n"); } } static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; struct sbuf *sbuf; sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); ixl_nvm_version_str(hw, sbuf); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } void ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma) { u8 nvma_ptr = nvma->config & 0xFF; u8 nvma_flags = (nvma->config & 0xF00) >> 8; const char * cmd_str; switch (nvma->command) { case I40E_NVM_READ: if (nvma_ptr == 0xF && nvma_flags == 0xF && nvma->offset == 0 && nvma->data_size == 1) { device_printf(dev, "NVMUPD: Get Driver Status Command\n"); return; } cmd_str = "READ "; break; case I40E_NVM_WRITE: cmd_str = "WRITE"; break; default: device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command); return; } device_printf(dev, "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n", cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size); } int ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd) { struct i40e_hw *hw = &pf->hw; struct i40e_nvm_access *nvma; device_t dev = pf->dev; enum i40e_status_code status = 0; size_t nvma_size, ifd_len, exp_len; int err, perrno; DEBUGFUNC("ixl_handle_nvmupd_cmd"); /* Sanity checks */ nvma_size = sizeof(struct i40e_nvm_access); ifd_len = ifd->ifd_len; if (ifd_len < nvma_size || ifd->ifd_data == NULL) { device_printf(dev, "%s: incorrect ifdrv length or data pointer\n", __func__); device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n", __func__, ifd_len, nvma_size); device_printf(dev, "%s: data pointer: %p\n", __func__, ifd->ifd_data); return (EINVAL); } nvma = malloc(ifd_len, M_IXL, M_WAITOK); err = copyin(ifd->ifd_data, nvma, ifd_len); if (err) { device_printf(dev, "%s: Cannot get request from user space\n", __func__); free(nvma, M_IXL); return (err); } if (pf->dbg_mask & IXL_DBG_NVMUPD) ixl_print_nvm_cmd(dev, nvma); if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) { int count = 0; while (count++ < 100) { i40e_msec_delay(100); if (!(pf->state & IXL_PF_STATE_ADAPTER_RESETTING)) break; } } if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) { device_printf(dev, "%s: timeout waiting for EMP reset to finish\n", __func__); free(nvma, M_IXL); return (-EBUSY); } if (nvma->data_size < 1 || nvma->data_size > 4096) { device_printf(dev, "%s: invalid request, data size not in supported range\n", __func__); free(nvma, M_IXL); return (EINVAL); } /* * Older versions of the NVM update tool don't set ifd_len to the size * of the entire buffer passed to the ioctl. Check the data_size field * in the contained i40e_nvm_access struct and ensure everything is * copied in from userspace. */ exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */ if (ifd_len < exp_len) { ifd_len = exp_len; nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK); err = copyin(ifd->ifd_data, nvma, ifd_len); if (err) { device_printf(dev, "%s: Cannot get request from user space\n", __func__); free(nvma, M_IXL); return (err); } } // TODO: Might need a different lock here // IXL_PF_LOCK(pf); status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno); // IXL_PF_UNLOCK(pf); err = copyout(nvma, ifd->ifd_data, ifd_len); free(nvma, M_IXL); if (err) { device_printf(dev, "%s: Cannot return data to user space\n", __func__); return (err); } /* Let the nvmupdate report errors, show them only when debug is enabled */ if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0) device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n", i40e_stat_str(hw, status), perrno); /* * -EPERM is actually ERESTART, which the kernel interprets as it needing * to run this ioctl again. So use -EACCES for -EPERM instead. */ if (perrno == -EPERM) return (-EACCES); else return (perrno); } int ixl_find_i2c_interface(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; bool i2c_en, port_matched; u32 reg; for (int i = 0; i < 4; i++) { reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i)); i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK); port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK) >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) & BIT(hw->port); if (i2c_en && port_matched) return (i); } return (-1); } static char * ixl_phy_type_string(u32 bit_pos, bool ext) { static char * phy_types_str[32] = { "SGMII", "1000BASE-KX", "10GBASE-KX4", "10GBASE-KR", "40GBASE-KR4", "XAUI", "XFI", "SFI", "XLAUI", "XLPPI", "40GBASE-CR4", "10GBASE-CR1", "SFP+ Active DA", "QSFP+ Active DA", "Reserved (14)", "Reserved (15)", "Reserved (16)", "100BASE-TX", "1000BASE-T", "10GBASE-T", "10GBASE-SR", "10GBASE-LR", "10GBASE-SFP+Cu", "10GBASE-CR1", "40GBASE-CR4", "40GBASE-SR4", "40GBASE-LR4", "1000BASE-SX", "1000BASE-LX", "1000BASE-T Optical", "20GBASE-KR2", "Reserved (31)" }; static char * ext_phy_types_str[8] = { "25GBASE-KR", "25GBASE-CR", "25GBASE-SR", "25GBASE-LR", "25GBASE-AOC", "25GBASE-ACC", - "Reserved (6)", - "Reserved (7)" + "2.5GBASE-T", + "5GBASE-T" }; if (ext && bit_pos > 7) return "Invalid_Ext"; if (bit_pos > 31) return "Invalid"; return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos]; } /* TODO: ERJ: I don't this is necessary anymore. */ int ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status) { device_t dev = pf->dev; struct i40e_hw *hw = &pf->hw; struct i40e_aq_desc desc; enum i40e_status_code status; struct i40e_aqc_get_link_status *aq_link_status = (struct i40e_aqc_get_link_status *)&desc.params.raw; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); if (status) { device_printf(dev, "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); return (EIO); } bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status)); return (0); } static char * ixl_phy_type_string_ls(u8 val) { if (val >= 0x1F) return ixl_phy_type_string(val - 0x1F, true); else return ixl_phy_type_string(val, false); } static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; struct sbuf *buf; int error = 0; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); return (ENOMEM); } struct i40e_aqc_get_link_status link_status; error = ixl_aq_get_link_status(pf, &link_status); if (error) { sbuf_delete(buf); return (error); } sbuf_printf(buf, "\n" "PHY Type : 0x%02x<%s>\n" "Speed : 0x%02x\n" "Link info: 0x%02x\n" "AN info : 0x%02x\n" "Ext info : 0x%02x\n" "Loopback : 0x%02x\n" "Max Frame: %d\n" "Config : 0x%02x\n" "Power : 0x%02x", link_status.phy_type, ixl_phy_type_string_ls(link_status.phy_type), link_status.link_speed, link_status.link_info, link_status.an_info, link_status.ext_info, link_status.loopback, link_status.max_frame_size, link_status.config, link_status.power_desc); error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; enum i40e_status_code status; struct i40e_aq_get_phy_abilities_resp abilities; struct sbuf *buf; int error = 0; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); return (ENOMEM); } status = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL); if (status) { device_printf(dev, "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); sbuf_delete(buf); return (EIO); } sbuf_printf(buf, "\n" "PHY Type : %08x", abilities.phy_type); if (abilities.phy_type != 0) { sbuf_printf(buf, "<"); for (int i = 0; i < 32; i++) if ((1 << i) & abilities.phy_type) sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false)); sbuf_printf(buf, ">"); } sbuf_printf(buf, "\nPHY Ext : %02x", abilities.phy_type_ext); if (abilities.phy_type_ext != 0) { sbuf_printf(buf, "<"); for (int i = 0; i < 4; i++) if ((1 << i) & abilities.phy_type_ext) sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true)); sbuf_printf(buf, ">"); } sbuf_printf(buf, "\nSpeed : %02x", abilities.link_speed); if (abilities.link_speed != 0) { u8 link_speed; sbuf_printf(buf, " <"); for (int i = 0; i < 8; i++) { link_speed = (1 << i) & abilities.link_speed; if (link_speed) sbuf_printf(buf, "%s, ", ixl_link_speed_string(link_speed)); } sbuf_printf(buf, ">"); } sbuf_printf(buf, "\n" "Abilities: %02x\n" "EEE cap : %04x\n" "EEER reg : %08x\n" "D3 Lpan : %02x\n" "ID : %02x %02x %02x %02x\n" "ModType : %02x %02x %02x\n" "ModType E: %01x\n" "FEC Cfg : %02x\n" "Ext CC : %02x", abilities.abilities, abilities.eee_capability, abilities.eeer_val, abilities.d3_lpan, abilities.phy_id[0], abilities.phy_id[1], abilities.phy_id[2], abilities.phy_id[3], abilities.module_type[0], abilities.module_type[1], abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5, abilities.fec_cfg_curr_mod_ext_info & 0x1F, abilities.ext_comp_code); error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct ixl_vsi *vsi = &pf->vsi; struct ixl_mac_filter *f; device_t dev = pf->dev; int error = 0, ftl_len = 0, ftl_counter = 0; struct sbuf *buf; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); return (ENOMEM); } sbuf_printf(buf, "\n"); /* Print MAC filters */ sbuf_printf(buf, "PF Filters:\n"); SLIST_FOREACH(f, &vsi->ftl, next) ftl_len++; if (ftl_len < 1) sbuf_printf(buf, "(none)\n"); else { SLIST_FOREACH(f, &vsi->ftl, next) { sbuf_printf(buf, MAC_FORMAT ", vlan %4d, flags %#06x", MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); /* don't print '\n' for last entry */ if (++ftl_counter != ftl_len) sbuf_printf(buf, "\n"); } } #ifdef PCI_IOV /* TODO: Give each VF its own filter list sysctl */ struct ixl_vf *vf; if (pf->num_vfs > 0) { sbuf_printf(buf, "\n\n"); for (int i = 0; i < pf->num_vfs; i++) { vf = &pf->vfs[i]; if (!(vf->vf_flags & VF_FLAG_ENABLED)) continue; vsi = &vf->vsi; ftl_len = 0, ftl_counter = 0; sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num); SLIST_FOREACH(f, &vsi->ftl, next) ftl_len++; if (ftl_len < 1) sbuf_printf(buf, "(none)\n"); else { SLIST_FOREACH(f, &vsi->ftl, next) { sbuf_printf(buf, MAC_FORMAT ", vlan %4d, flags %#06x\n", MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); } } } } #endif error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } #define IXL_SW_RES_SIZE 0x14 int ixl_res_alloc_cmp(const void *a, const void *b) { const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two; one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a; two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b; return ((int)one->resource_type - (int)two->resource_type); } /* * Longest string length: 25 */ const char * ixl_switch_res_type_string(u8 type) { static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = { "VEB", "VSI", "Perfect Match MAC address", "S-tag", "(Reserved)", "Multicast hash entry", "Unicast hash entry", "VLAN", "VSI List entry", "(Reserved)", "VLAN Statistic Pool", "Mirror Rule", "Queue Set", "Inner VLAN Forward filter", "(Reserved)", "Inner MAC", "IP", "GRE/VN1 Key", "VN2 Key", "Tunneling Port" }; if (type < IXL_SW_RES_SIZE) return ixl_switch_res_type_strings[type]; else return "(Reserved)"; } static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *buf; enum i40e_status_code status; int error = 0; u8 num_entries; struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE]; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for output.\n"); return (ENOMEM); } bzero(resp, sizeof(resp)); status = i40e_aq_get_switch_resource_alloc(hw, &num_entries, resp, IXL_SW_RES_SIZE, NULL); if (status) { device_printf(dev, "%s: get_switch_resource_alloc() error %s, aq error %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); sbuf_delete(buf); return (error); } /* Sort entries by type for display */ qsort(resp, num_entries, sizeof(struct i40e_aqc_switch_resource_alloc_element_resp), &ixl_res_alloc_cmp); sbuf_cat(buf, "\n"); sbuf_printf(buf, "# of entries: %d\n", num_entries); sbuf_printf(buf, " Type | Guaranteed | Total | Used | Un-allocated\n" " | (this) | (all) | (this) | (all) \n"); for (int i = 0; i < num_entries; i++) { sbuf_printf(buf, "%25s | %10d %5d %6d %12d", ixl_switch_res_type_string(resp[i].resource_type), resp[i].guaranteed, resp[i].total, resp[i].used, resp[i].total_unalloced); if (i < num_entries - 1) sbuf_cat(buf, "\n"); } error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } enum ixl_sw_seid_offset { IXL_SW_SEID_EMP = 1, IXL_SW_SEID_MAC_START = 2, IXL_SW_SEID_MAC_END = 5, IXL_SW_SEID_PF_START = 16, IXL_SW_SEID_PF_END = 31, IXL_SW_SEID_VF_START = 32, IXL_SW_SEID_VF_END = 159, }; /* * Caller must init and delete sbuf; this function will clear and * finish it for caller. * * Note: The SEID argument only applies for elements defined by FW at * power-on; these include the EMP, Ports, PFs and VFs. */ static char * ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid) { sbuf_clear(s); /* If SEID is in certain ranges, then we can infer the * mapping of SEID to switch element. */ if (seid == IXL_SW_SEID_EMP) { sbuf_cat(s, "EMP"); goto out; } else if (seid >= IXL_SW_SEID_MAC_START && seid <= IXL_SW_SEID_MAC_END) { sbuf_printf(s, "MAC %2d", seid - IXL_SW_SEID_MAC_START); goto out; } else if (seid >= IXL_SW_SEID_PF_START && seid <= IXL_SW_SEID_PF_END) { sbuf_printf(s, "PF %3d", seid - IXL_SW_SEID_PF_START); goto out; } else if (seid >= IXL_SW_SEID_VF_START && seid <= IXL_SW_SEID_VF_END) { sbuf_printf(s, "VF %3d", seid - IXL_SW_SEID_VF_START); goto out; } switch (element_type) { case I40E_AQ_SW_ELEM_TYPE_BMC: sbuf_cat(s, "BMC"); break; case I40E_AQ_SW_ELEM_TYPE_PV: sbuf_cat(s, "PV"); break; case I40E_AQ_SW_ELEM_TYPE_VEB: sbuf_cat(s, "VEB"); break; case I40E_AQ_SW_ELEM_TYPE_PA: sbuf_cat(s, "PA"); break; case I40E_AQ_SW_ELEM_TYPE_VSI: sbuf_printf(s, "VSI"); break; default: sbuf_cat(s, "?"); break; } out: sbuf_finish(s); return sbuf_data(s); } static int ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b) { const struct i40e_aqc_switch_config_element_resp *one, *two; one = (const struct i40e_aqc_switch_config_element_resp *)a; two = (const struct i40e_aqc_switch_config_element_resp *)b; return ((int)one->seid - (int)two->seid); } static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *buf; struct sbuf *nmbuf; enum i40e_status_code status; int error = 0; u16 next = 0; u8 aq_buf[I40E_AQ_LARGE_BUF]; struct i40e_aqc_switch_config_element_resp *elem; struct i40e_aqc_get_switch_config_resp *sw_config; sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); return (ENOMEM); } status = i40e_aq_get_switch_config(hw, sw_config, sizeof(aq_buf), &next, NULL); if (status) { device_printf(dev, "%s: aq_get_switch_config() error %s, aq error %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); sbuf_delete(buf); return error; } if (next) device_printf(dev, "%s: TODO: get more config with SEID %d\n", __func__, next); nmbuf = sbuf_new_auto(); if (!nmbuf) { device_printf(dev, "Could not allocate sbuf for name output.\n"); sbuf_delete(buf); return (ENOMEM); } /* Sort entries by SEID for display */ qsort(sw_config->element, sw_config->header.num_reported, sizeof(struct i40e_aqc_switch_config_element_resp), &ixl_sw_cfg_elem_seid_cmp); sbuf_cat(buf, "\n"); /* Assuming <= 255 elements in switch */ sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported); sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total); /* Exclude: * Revision -- all elements are revision 1 for now */ sbuf_printf(buf, "SEID ( Name ) | Up ( Name ) | Down ( Name ) | Conn Type\n" " | | | (uplink)\n"); for (int i = 0; i < sw_config->header.num_reported; i++) { elem = &sw_config->element[i]; // "%4d (%8s) | %8s %8s %#8x", sbuf_printf(buf, "%4d", elem->seid); sbuf_cat(buf, " "); sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, elem->element_type, elem->seid)); sbuf_cat(buf, " | "); sbuf_printf(buf, "%4d", elem->uplink_seid); sbuf_cat(buf, " "); sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 0, elem->uplink_seid)); sbuf_cat(buf, " | "); sbuf_printf(buf, "%4d", elem->downlink_seid); sbuf_cat(buf, " "); sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 0, elem->downlink_seid)); sbuf_cat(buf, " | "); sbuf_printf(buf, "%8d", elem->connection_type); if (i < sw_config->header.num_reported - 1) sbuf_cat(buf, "\n"); } sbuf_delete(nmbuf); error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *buf; int error = 0; enum i40e_status_code status; u32 reg; struct i40e_aqc_get_set_rss_key_data key_data; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for output.\n"); return (ENOMEM); } bzero(&key_data, sizeof(key_data)); sbuf_cat(buf, "\n"); if (hw->mac.type == I40E_MAC_X722) { status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data); if (status) device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); } else { for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) { reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); bcopy(®, ((caddr_t)&key_data) + (i << 2), 4); } } ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true); error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } static void ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text) { int i, j, k, width; char c; if (length < 1 || buf == NULL) return; int byte_stride = 16; int lines = length / byte_stride; int rem = length % byte_stride; if (rem > 0) lines++; for (i = 0; i < lines; i++) { width = (rem > 0 && i == lines - 1) ? rem : byte_stride; sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride); for (j = 0; j < width; j++) sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]); if (width < byte_stride) { for (k = 0; k < (byte_stride - width); k++) sbuf_printf(sb, " "); } if (!text) { sbuf_printf(sb, "\n"); continue; } for (j = 0; j < width; j++) { c = (char)buf[i * byte_stride + j]; if (c < 32 || c > 126) sbuf_printf(sb, "."); else sbuf_printf(sb, "%c", c); if (j == width - 1) sbuf_printf(sb, "\n"); } } } static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *buf; int error = 0; enum i40e_status_code status; u8 hlut[512]; u32 reg; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for output.\n"); return (ENOMEM); } bzero(hlut, sizeof(hlut)); sbuf_cat(buf, "\n"); if (hw->mac.type == I40E_MAC_X722) { status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut)); if (status) device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); } else { for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) { reg = rd32(hw, I40E_PFQF_HLUT(i)); bcopy(®, &hlut[i << 2], 4); } } ixl_sbuf_print_bytes(buf, hlut, 512, 0, false); error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; u64 hena; hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); return sysctl_handle_long(oidp, NULL, hena, req); } /* * Sysctl to disable firmware's link management * * 1 - Disable link management on this port * 0 - Re-enable link management * * On normal NVMs, firmware manages link by default. */ static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; int requested_mode = -1; enum i40e_status_code status = 0; int error = 0; /* Read in new mode */ error = sysctl_handle_int(oidp, &requested_mode, 0, req); if ((error) || (req->newptr == NULL)) return (error); /* Check for sane value */ if (requested_mode < 0 || requested_mode > 1) { device_printf(dev, "Valid modes are 0 or 1\n"); return (EINVAL); } /* Set new mode */ status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL); if (status) { device_printf(dev, "%s: Error setting new phy debug mode %s," " aq error: %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); return (EIO); } return (0); } /* * Read some diagnostic data from a (Q)SFP+ module * * SFP A2 QSFP Lower Page * Temperature 96-97 22-23 * Vcc 98-99 26-27 * TX power 102-103 34-35..40-41 * RX power 104-105 50-51..56-57 */ static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; struct sbuf *sbuf; int error = 0; u8 output; if (req->oldptr == NULL) { error = SYSCTL_OUT(req, 0, 128); return (0); } error = pf->read_i2c_byte(pf, 0, 0xA0, &output); if (error) { device_printf(dev, "Error reading from i2c\n"); return (error); } /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */ if (output == 0x3) { /* * Check for: * - Internally calibrated data * - Diagnostic monitoring is implemented */ pf->read_i2c_byte(pf, 92, 0xA0, &output); if (!(output & 0x60)) { device_printf(dev, "Module doesn't support diagnostics: %02X\n", output); return (0); } sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); for (u8 offset = 96; offset < 100; offset++) { pf->read_i2c_byte(pf, offset, 0xA2, &output); sbuf_printf(sbuf, "%02X ", output); } for (u8 offset = 102; offset < 106; offset++) { pf->read_i2c_byte(pf, offset, 0xA2, &output); sbuf_printf(sbuf, "%02X ", output); } } else if (output == 0xD || output == 0x11) { /* * QSFP+ modules are always internally calibrated, and must indicate * what types of diagnostic monitoring are implemented */ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); for (u8 offset = 22; offset < 24; offset++) { pf->read_i2c_byte(pf, offset, 0xA0, &output); sbuf_printf(sbuf, "%02X ", output); } for (u8 offset = 26; offset < 28; offset++) { pf->read_i2c_byte(pf, offset, 0xA0, &output); sbuf_printf(sbuf, "%02X ", output); } /* Read the data from the first lane */ for (u8 offset = 34; offset < 36; offset++) { pf->read_i2c_byte(pf, offset, 0xA0, &output); sbuf_printf(sbuf, "%02X ", output); } for (u8 offset = 50; offset < 52; offset++) { pf->read_i2c_byte(pf, offset, 0xA0, &output); sbuf_printf(sbuf, "%02X ", output); } } else { device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output); return (0); } sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /* * Sysctl to read a byte from I2C bus. * * Input: 32-bit value: * bits 0-7: device address (0xA0 or 0xA2) * bits 8-15: offset (0-255) * bits 16-31: unused * Output: 8-bit value read */ static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; int input = -1, error = 0; u8 dev_addr, offset, output; /* Read in I2C read parameters */ error = sysctl_handle_int(oidp, &input, 0, req); if ((error) || (req->newptr == NULL)) return (error); /* Validate device address */ dev_addr = input & 0xFF; if (dev_addr != 0xA0 && dev_addr != 0xA2) { return (EINVAL); } offset = (input >> 8) & 0xFF; error = pf->read_i2c_byte(pf, offset, dev_addr, &output); if (error) return (error); device_printf(dev, "%02X\n", output); return (0); } /* * Sysctl to write a byte to the I2C bus. * * Input: 32-bit value: * bits 0-7: device address (0xA0 or 0xA2) * bits 8-15: offset (0-255) * bits 16-23: value to write * bits 24-31: unused * Output: 8-bit value written */ static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; int input = -1, error = 0; u8 dev_addr, offset, value; /* Read in I2C write parameters */ error = sysctl_handle_int(oidp, &input, 0, req); if ((error) || (req->newptr == NULL)) return (error); /* Validate device address */ dev_addr = input & 0xFF; if (dev_addr != 0xA0 && dev_addr != 0xA2) { return (EINVAL); } offset = (input >> 8) & 0xFF; value = (input >> 16) & 0xFF; error = pf->write_i2c_byte(pf, offset, dev_addr, value); if (error) return (error); device_printf(dev, "%02X written\n", value); return (0); } static int ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, u8 bit_pos, int *is_set) { device_t dev = pf->dev; struct i40e_hw *hw = &pf->hw; enum i40e_status_code status; if (IXL_PF_IN_RECOVERY_MODE(pf)) return (EIO); status = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, abilities, NULL); if (status) { device_printf(dev, "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); return (EIO); } *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos); return (0); } static int ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, u8 bit_pos, int set) { device_t dev = pf->dev; struct i40e_hw *hw = &pf->hw; struct i40e_aq_set_phy_config config; enum i40e_status_code status; /* Set new PHY config */ memset(&config, 0, sizeof(config)); config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos); if (set) config.fec_config |= bit_pos; if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) { config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; config.phy_type = abilities->phy_type; config.phy_type_ext = abilities->phy_type_ext; config.link_speed = abilities->link_speed; config.eee_capability = abilities->eee_capability; config.eeer = abilities->eeer_val; config.low_power_ctrl = abilities->d3_lpan; status = i40e_aq_set_phy_config(hw, &config, NULL); if (status) { device_printf(dev, "%s: i40e_aq_set_phy_config() status %s, aq error %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); return (EIO); } } return (0); } static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int mode, error = 0; struct i40e_aq_get_phy_abilities_resp abilities; error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode); if (error) return (error); /* Read in new mode */ error = sysctl_handle_int(oidp, &mode, 0, req); if ((error) || (req->newptr == NULL)) return (error); return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode)); } static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int mode, error = 0; struct i40e_aq_get_phy_abilities_resp abilities; error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode); if (error) return (error); /* Read in new mode */ error = sysctl_handle_int(oidp, &mode, 0, req); if ((error) || (req->newptr == NULL)) return (error); return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode)); } static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int mode, error = 0; struct i40e_aq_get_phy_abilities_resp abilities; error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode); if (error) return (error); /* Read in new mode */ error = sysctl_handle_int(oidp, &mode, 0, req); if ((error) || (req->newptr == NULL)) return (error); return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode)); } static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int mode, error = 0; struct i40e_aq_get_phy_abilities_resp abilities; error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode); if (error) return (error); /* Read in new mode */ error = sysctl_handle_int(oidp, &mode, 0, req); if ((error) || (req->newptr == NULL)) return (error); return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode)); } static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int mode, error = 0; struct i40e_aq_get_phy_abilities_resp abilities; error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode); if (error) return (error); /* Read in new mode */ error = sysctl_handle_int(oidp, &mode, 0, req); if ((error) || (req->newptr == NULL)) return (error); return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode)); } static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *buf; int error = 0; enum i40e_status_code status; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for output.\n"); return (ENOMEM); } u8 *final_buff; /* This amount is only necessary if reading the entire cluster into memory */ #define IXL_FINAL_BUFF_SIZE (1280 * 1024) final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_NOWAIT); if (final_buff == NULL) { device_printf(dev, "Could not allocate memory for output.\n"); goto out; } int final_buff_len = 0; u8 cluster_id = 1; bool more = true; u8 dump_buf[4096]; u16 curr_buff_size = 4096; u8 curr_next_table = 0; u32 curr_next_index = 0; u16 ret_buff_size; u8 ret_next_table; u32 ret_next_index; sbuf_cat(buf, "\n"); while (more) { status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size, dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL); if (status) { device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); goto free_out; } /* copy info out of temp buffer */ bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size); final_buff_len += ret_buff_size; if (ret_next_table != curr_next_table) { /* We're done with the current table; we can dump out read data. */ sbuf_printf(buf, "%d:", curr_next_table); int bytes_printed = 0; while (bytes_printed <= final_buff_len) { sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), ""); bytes_printed += 16; } sbuf_cat(buf, "\n"); /* The entire cluster has been read; we're finished */ if (ret_next_table == 0xFF) break; /* Otherwise clear the output buffer and continue reading */ bzero(final_buff, IXL_FINAL_BUFF_SIZE); final_buff_len = 0; } if (ret_next_index == 0xFFFFFFFF) ret_next_index = 0; bzero(dump_buf, sizeof(dump_buf)); curr_next_table = ret_next_table; curr_next_index = ret_next_index; } free_out: free(final_buff, M_DEVBUF); out: error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } static int ixl_start_fw_lldp(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; enum i40e_status_code status; status = i40e_aq_start_lldp(hw, false, NULL); if (status != I40E_SUCCESS) { switch (hw->aq.asq_last_status) { case I40E_AQ_RC_EEXIST: device_printf(pf->dev, "FW LLDP agent is already running\n"); break; case I40E_AQ_RC_EPERM: device_printf(pf->dev, "Device configuration forbids SW from starting " "the LLDP agent. Set the \"LLDP Agent\" UEFI HII " "attribute to \"Enabled\" to use this sysctl\n"); return (EINVAL); default: device_printf(pf->dev, "Starting FW LLDP agent failed: error: %s, %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); return (EINVAL); } } atomic_clear_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); return (0); } static int ixl_stop_fw_lldp(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; enum i40e_status_code status; if (hw->func_caps.npar_enable != 0) { device_printf(dev, "Disabling FW LLDP agent is not supported on this device\n"); return (EINVAL); } if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) { device_printf(dev, "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n"); return (EINVAL); } status = i40e_aq_stop_lldp(hw, true, false, NULL); if (status != I40E_SUCCESS) { if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) { device_printf(dev, "Disabling FW LLDP agent failed: error: %s, %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); return (EINVAL); } device_printf(dev, "FW LLDP agent is already stopped\n"); } #ifndef EXTERNAL_RELEASE /* Let the FW set default DCB configuration on link UP as described in DCR 307.1 */ #endif i40e_aq_set_dcb_parameters(hw, true, NULL); atomic_set_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); return (0); } static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int state, new_state, error = 0; state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0); /* Read in new mode */ error = sysctl_handle_int(oidp, &new_state, 0, req); if ((error) || (req->newptr == NULL)) return (error); /* Already in requested state */ if (new_state == state) return (error); if (new_state == 0) return ixl_stop_fw_lldp(pf); return ixl_start_fw_lldp(pf); +} + +static int +ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS) +{ + struct ixl_pf *pf = (struct ixl_pf *)arg1; + int state, new_state; + int sysctl_handle_status = 0; + enum i40e_status_code cmd_status; + + /* Init states' values */ + state = new_state = (!!(pf->state & IXL_PF_STATE_EEE_ENABLED)); + + /* Get requested mode */ + sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req); + if ((sysctl_handle_status) || (req->newptr == NULL)) + return (sysctl_handle_status); + + /* Check if state has changed */ + if (new_state == state) + return (0); + + /* Set new state */ + cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state)); + + /* Save new state or report error */ + if (!cmd_status) { + if (new_state == 0) + atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED); + else + atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED); + } else if (cmd_status == I40E_ERR_CONFIG) + return (EPERM); + else + return (EIO); + + return (0); } int ixl_attach_get_link_status(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; int error = 0; if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || (hw->aq.fw_maj_ver < 4)) { i40e_msec_delay(75); error = i40e_aq_set_link_restart_an(hw, TRUE, NULL); if (error) { device_printf(dev, "link restart failed, aq_err=%d\n", pf->hw.aq.asq_last_status); return error; } } /* Determine link state */ hw->phy.get_link_info = TRUE; i40e_get_link_status(hw, &pf->link_up); return (0); } static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int requested = 0, error = 0; /* Read in new mode */ error = sysctl_handle_int(oidp, &requested, 0, req); if ((error) || (req->newptr == NULL)) return (error); /* Initiate the PF reset later in the admin task */ atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ); return (error); } static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; int requested = 0, error = 0; /* Read in new mode */ error = sysctl_handle_int(oidp, &requested, 0, req); if ((error) || (req->newptr == NULL)) return (error); wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); return (error); } static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; int requested = 0, error = 0; /* Read in new mode */ error = sysctl_handle_int(oidp, &requested, 0, req); if ((error) || (req->newptr == NULL)) return (error); wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK); return (error); } /* * Print out mapping of TX queue indexes and Rx queue indexes * to MSI-X vectors. */ static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct ixl_vsi *vsi = &pf->vsi; device_t dev = pf->dev; struct sbuf *buf; int error = 0; struct ixl_rx_queue *rx_que = vsi->rx_queues; struct ixl_tx_queue *tx_que = vsi->tx_queues; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for output.\n"); return (ENOMEM); } sbuf_cat(buf, "\n"); for (int i = 0; i < vsi->num_rx_queues; i++) { rx_que = &vsi->rx_queues[i]; sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix); } for (int i = 0; i < vsi->num_tx_queues; i++) { tx_que = &vsi->tx_queues[i]; sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix); } error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } Index: head/sys/dev/ixl/virtchnl.h =================================================================== --- head/sys/dev/ixl/virtchnl.h (revision 365230) +++ head/sys/dev/ixl/virtchnl.h (revision 365231) @@ -1,747 +1,751 @@ /****************************************************************************** Copyright (c) 2013-2018, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _VIRTCHNL_H_ #define _VIRTCHNL_H_ /* Description: * This header file describes the VF-PF communication protocol used * by the drivers for all devices starting from our 40G product line * * Admin queue buffer usage: * desc->opcode is always aqc_opc_send_msg_to_pf * flags, retval, datalen, and data addr are all used normally. * The Firmware copies the cookie fields when sending messages between the * PF and VF, but uses all other fields internally. Due to this limitation, * we must send all messages as "indirect", i.e. using an external buffer. * * All the VSI indexes are relative to the VF. Each VF can have maximum of * three VSIs. All the queue indexes are relative to the VSI. Each VF can * have a maximum of sixteen queues for all of its VSIs. * * The PF is required to return a status code in v_retval for all messages * except RESET_VF, which does not require any response. The return value * is of status_code type, defined in the shared type.h. * * In general, VF driver initialization should roughly follow the order of * these opcodes. The VF driver must first validate the API version of the * PF driver, then request a reset, then get resources, then configure * queues and interrupts. After these operations are complete, the VF * driver may start its queues, optionally add MAC and VLAN filters, and * process traffic. */ /* START GENERIC DEFINES * Need to ensure the following enums and defines hold the same meaning and * value in current and future projects */ /* Error Codes */ enum virtchnl_status_code { VIRTCHNL_STATUS_SUCCESS = 0, VIRTCHNL_ERR_PARAM = -5, VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38, VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39, VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40, VIRTCHNL_STATUS_NOT_SUPPORTED = -64, }; +#define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0 #define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1 #define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2 #define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3 #define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4 #define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5 #define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6 +#define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7 enum virtchnl_link_speed { VIRTCHNL_LINK_SPEED_UNKNOWN = 0, VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT), VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT), VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT), VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT), VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT), VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT), + VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT), + VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT), }; /* for hsplit_0 field of Rx HMC context */ /* deprecated with AVF 1.0 */ enum virtchnl_rx_hsplit { VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0, VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1, VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2, VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4, VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8, }; #define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6 /* END GENERIC DEFINES */ /* Opcodes for VF-PF communication. These are placed in the v_opcode field * of the virtchnl_msg structure. */ enum virtchnl_ops { /* The PF sends status change events to VFs using * the VIRTCHNL_OP_EVENT opcode. * VFs send requests to the PF using the other ops. * Use of "advanced opcode" features must be negotiated as part of capabilities * exchange and are not considered part of base mode feature set. */ VIRTCHNL_OP_UNKNOWN = 0, VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ VIRTCHNL_OP_RESET_VF = 2, VIRTCHNL_OP_GET_VF_RESOURCES = 3, VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, VIRTCHNL_OP_ENABLE_QUEUES = 8, VIRTCHNL_OP_DISABLE_QUEUES = 9, VIRTCHNL_OP_ADD_ETH_ADDR = 10, VIRTCHNL_OP_DEL_ETH_ADDR = 11, VIRTCHNL_OP_ADD_VLAN = 12, VIRTCHNL_OP_DEL_VLAN = 13, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, VIRTCHNL_OP_GET_STATS = 15, VIRTCHNL_OP_RSVD = 16, VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ VIRTCHNL_OP_IWARP = 20, /* advanced opcode */ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */ VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */ VIRTCHNL_OP_CONFIG_RSS_KEY = 23, VIRTCHNL_OP_CONFIG_RSS_LUT = 24, VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, VIRTCHNL_OP_SET_RSS_HENA = 26, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28, VIRTCHNL_OP_REQUEST_QUEUES = 29, }; /* This macro is used to generate a compilation error if a structure * is not exactly the correct length. It gives a divide by zero error if the * structure is not of the correct size, otherwise it creates an enum that is * never used. */ #define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \ {virtchnl_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0)} /* Virtual channel message descriptor. This overlays the admin queue * descriptor. All other data is passed in external buffers. */ struct virtchnl_msg { u8 pad[8]; /* AQ flags/opcode/len/retval fields */ enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ enum virtchnl_status_code v_retval; /* ditto for desc->retval */ u32 vfid; /* used by PF when sending to VF */ }; VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg); /* Message descriptions and data structures.*/ /* VIRTCHNL_OP_VERSION * VF posts its version number to the PF. PF responds with its version number * in the same format, along with a return code. * Reply from PF has its major/minor versions also in param0 and param1. * If there is a major version mismatch, then the VF cannot operate. * If there is a minor version mismatch, then the VF can operate but should * add a warning to the system log. * * This enum element MUST always be specified as == 1, regardless of other * changes in the API. The PF must always respond to this message without * error regardless of version mismatch. */ #define VIRTCHNL_VERSION_MAJOR 1 #define VIRTCHNL_VERSION_MINOR 1 #define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 struct virtchnl_version_info { u32 major; u32 minor; }; VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info); #define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0)) #define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1)) /* VIRTCHNL_OP_RESET_VF * VF sends this request to PF with no parameters * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register * until reset completion is indicated. The admin queue must be reinitialized * after this operation. * * When reset is complete, PF must ensure that all queues in all VSIs associated * with the VF are stopped, all queue configurations in the HMC are set to 0, * and all MAC and VLAN filters (except the default MAC address) on all VSIs * are cleared. */ /* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV * vsi_type should always be 6 for backward compatibility. Add other fields * as needed. */ enum virtchnl_vsi_type { VIRTCHNL_VSI_TYPE_INVALID = 0, VIRTCHNL_VSI_SRIOV = 6, }; /* VIRTCHNL_OP_GET_VF_RESOURCES * Version 1.0 VF sends this request to PF with no parameters * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities * PF responds with an indirect message containing * virtchnl_vf_resource and one or more * virtchnl_vsi_resource structures. */ struct virtchnl_vsi_resource { u16 vsi_id; u16 num_queue_pairs; enum virtchnl_vsi_type vsi_type; u16 qset_handle; u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; }; VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); /* VF capability flags * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including * TX/RX Checksum offloading and TSO for non-tunnelled packets. */ #define VIRTCHNL_VF_OFFLOAD_L2 0x00000001 #define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 #define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004 #define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 #define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040 #define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 #define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 #define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000 #define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ VIRTCHNL_VF_OFFLOAD_VLAN | \ VIRTCHNL_VF_OFFLOAD_RSS_PF) struct virtchnl_vf_resource { u16 num_vsis; u16 num_queue_pairs; u16 max_vectors; u16 max_mtu; u32 vf_cap_flags; u32 rss_key_size; u32 rss_lut_size; struct virtchnl_vsi_resource vsi_res[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource); /* VIRTCHNL_OP_CONFIG_TX_QUEUE * VF sends this message to set up parameters for one TX queue. * External data buffer contains one instance of virtchnl_txq_info. * PF configures requested queue and returns a status code. */ /* Tx queue config info */ struct virtchnl_txq_info { u16 vsi_id; u16 queue_id; u16 ring_len; /* number of descriptors, multiple of 8 */ u16 headwb_enabled; /* deprecated with AVF 1.0 */ u64 dma_ring_addr; u64 dma_headwb_addr; /* deprecated with AVF 1.0 */ }; VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info); /* VIRTCHNL_OP_CONFIG_RX_QUEUE * VF sends this message to set up parameters for one RX queue. * External data buffer contains one instance of virtchnl_rxq_info. * PF configures requested queue and returns a status code. */ /* Rx queue config info */ struct virtchnl_rxq_info { u16 vsi_id; u16 queue_id; u32 ring_len; /* number of descriptors, multiple of 32 */ u16 hdr_size; u16 splithdr_enabled; /* deprecated with AVF 1.0 */ u32 databuffer_size; u32 max_pkt_size; u32 pad1; u64 dma_ring_addr; enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */ u32 pad2; }; VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info); /* VIRTCHNL_OP_CONFIG_VSI_QUEUES * VF sends this message to set parameters for all active TX and RX queues * associated with the specified VSI. * PF configures queues and returns status. * If the number of queues specified is greater than the number of queues * associated with the VSI, an error is returned and no queues are configured. */ struct virtchnl_queue_pair_info { /* NOTE: vsi_id and queue_id should be identical for both queues. */ struct virtchnl_txq_info txq; struct virtchnl_rxq_info rxq; }; VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info); struct virtchnl_vsi_queue_config_info { u16 vsi_id; u16 num_queue_pairs; u32 pad; struct virtchnl_queue_pair_info qpair[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); /* VIRTCHNL_OP_REQUEST_QUEUES * VF sends this message to request the PF to allocate additional queues to * this VF. Each VF gets a guaranteed number of queues on init but asking for * additional queues must be negotiated. This is a best effort request as it * is possible the PF does not have enough queues left to support the request. * If the PF cannot support the number requested it will respond with the * maximum number it is able to support; otherwise it will respond with the * number requested. */ /* VF resource request */ struct virtchnl_vf_res_request { u16 num_queue_pairs; }; /* VIRTCHNL_OP_CONFIG_IRQ_MAP * VF uses this message to map vectors to queues. * The rxq_map and txq_map fields are bitmaps used to indicate which queues * are to be associated with the specified vector. * The "other" causes are always mapped to vector 0. * PF configures interrupt mapping and returns status. */ struct virtchnl_vector_map { u16 vsi_id; u16 vector_id; u16 rxq_map; u16 txq_map; u16 rxitr_idx; u16 txitr_idx; }; VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map); struct virtchnl_irq_map_info { u16 num_vectors; struct virtchnl_vector_map vecmap[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info); /* VIRTCHNL_OP_ENABLE_QUEUES * VIRTCHNL_OP_DISABLE_QUEUES * VF sends these message to enable or disable TX/RX queue pairs. * The queues fields are bitmaps indicating which queues to act upon. * (Currently, we only support 16 queues per VF, but we make the field * u32 to allow for expansion.) * PF performs requested action and returns status. */ struct virtchnl_queue_select { u16 vsi_id; u16 pad; u32 rx_queues; u32 tx_queues; }; VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select); /* VIRTCHNL_OP_ADD_ETH_ADDR * VF sends this message in order to add one or more unicast or multicast * address filters for the specified VSI. * PF adds the filters and returns status. */ /* VIRTCHNL_OP_DEL_ETH_ADDR * VF sends this message in order to remove one or more unicast or multicast * filters for the specified VSI. * PF removes the filters and returns status. */ struct virtchnl_ether_addr { u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; u8 pad[2]; }; VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr); struct virtchnl_ether_addr_list { u16 vsi_id; u16 num_elements; struct virtchnl_ether_addr list[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list); /* VIRTCHNL_OP_ADD_VLAN * VF sends this message to add one or more VLAN tag filters for receives. * PF adds the filters and returns status. * If a port VLAN is configured by the PF, this operation will return an * error to the VF. */ /* VIRTCHNL_OP_DEL_VLAN * VF sends this message to remove one or more VLAN tag filters for receives. * PF removes the filters and returns status. * If a port VLAN is configured by the PF, this operation will return an * error to the VF. */ struct virtchnl_vlan_filter_list { u16 vsi_id; u16 num_elements; u16 vlan_id[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list); /* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE * VF sends VSI id and flags. * PF returns status code in retval. * Note: we assume that broadcast accept mode is always enabled. */ struct virtchnl_promisc_info { u16 vsi_id; u16 flags; }; VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info); #define FLAG_VF_UNICAST_PROMISC 0x00000001 #define FLAG_VF_MULTICAST_PROMISC 0x00000002 /* VIRTCHNL_OP_GET_STATS * VF sends this message to request stats for the selected VSI. VF uses * the virtchnl_queue_select struct to specify the VSI. The queue_id * field is ignored by the PF. * * PF replies with struct eth_stats in an external buffer. */ /* VIRTCHNL_OP_CONFIG_RSS_KEY * VIRTCHNL_OP_CONFIG_RSS_LUT * VF sends these messages to configure RSS. Only supported if both PF * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during * configuration negotiation. If this is the case, then the RSS fields in * the VF resource struct are valid. * Both the key and LUT are initialized to 0 by the PF, meaning that * RSS is effectively disabled until set up by the VF. */ struct virtchnl_rss_key { u16 vsi_id; u16 key_len; u8 key[1]; /* RSS hash key, packed bytes */ }; VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key); struct virtchnl_rss_lut { u16 vsi_id; u16 lut_entries; u8 lut[1]; /* RSS lookup table */ }; VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut); /* VIRTCHNL_OP_GET_RSS_HENA_CAPS * VIRTCHNL_OP_SET_RSS_HENA * VF sends these messages to get and set the hash filter enable bits for RSS. * By default, the PF sets these to all possible traffic types that the * hardware supports. The VF can query this value if it wants to change the * traffic types that are hashed by the hardware. */ struct virtchnl_rss_hena { u64 hena; }; VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena); /* VIRTCHNL_OP_EVENT * PF sends this message to inform the VF driver of events that may affect it. * No direct response is expected from the VF, though it may generate other * messages in response to this one. */ enum virtchnl_event_codes { VIRTCHNL_EVENT_UNKNOWN = 0, VIRTCHNL_EVENT_LINK_CHANGE, VIRTCHNL_EVENT_RESET_IMPENDING, VIRTCHNL_EVENT_PF_DRIVER_CLOSE, }; #define PF_EVENT_SEVERITY_INFO 0 #define PF_EVENT_SEVERITY_ATTENTION 1 #define PF_EVENT_SEVERITY_ACTION_REQUIRED 2 #define PF_EVENT_SEVERITY_CERTAIN_DOOM 255 struct virtchnl_pf_event { enum virtchnl_event_codes event; union { struct { enum virtchnl_link_speed link_speed; bool link_status; } link_event; } event_data; int severity; }; VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event); /* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP * VF uses this message to request PF to map IWARP vectors to IWARP queues. * The request for this originates from the VF IWARP driver through * a client interface between VF LAN and VF IWARP driver. * A vector could have an AEQ and CEQ attached to it although * there is a single AEQ per VF IWARP instance in which case * most vectors will have an INVALID_IDX for aeq and valid idx for ceq. * There will never be a case where there will be multiple CEQs attached * to a single vector. * PF configures interrupt mapping and returns status. */ /* HW does not define a type value for AEQ; only for RX/TX and CEQ. * In order for us to keep the interface simple, SW will define a * unique type value for AEQ. */ #define QUEUE_TYPE_PE_AEQ 0x80 #define QUEUE_INVALID_IDX 0xFFFF struct virtchnl_iwarp_qv_info { u32 v_idx; /* msix_vector */ u16 ceq_idx; u16 aeq_idx; u8 itr_idx; }; VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info); struct virtchnl_iwarp_qvlist_info { u32 num_vectors; struct virtchnl_iwarp_qv_info qv_info[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info); /* VF reset states - these are written into the RSTAT register: * VFGEN_RSTAT on the VF * When the PF initiates a reset, it writes 0 * When the reset is complete, it writes 1 * When the PF detects that the VF has recovered, it writes 2 * VF checks this register periodically to determine if a reset has occurred, * then polls it to know when the reset is complete. * If either the PF or VF reads the register while the hardware * is in a reset state, it will return DEADBEEF, which, when masked * will result in 3. */ enum virtchnl_vfr_states { VIRTCHNL_VFR_INPROGRESS = 0, VIRTCHNL_VFR_COMPLETED, VIRTCHNL_VFR_VFACTIVE, }; /** * virtchnl_vc_validate_vf_msg * @ver: Virtchnl version info * @v_opcode: Opcode for the message * @msg: pointer to the msg buffer * @msglen: msg length * * validate msg format against struct for each opcode */ static inline int virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, u8 *msg, u16 msglen) { bool err_msg_format = FALSE; int valid_len = 0; /* Validate message length. */ switch (v_opcode) { case VIRTCHNL_OP_VERSION: valid_len = sizeof(struct virtchnl_version_info); break; case VIRTCHNL_OP_RESET_VF: break; case VIRTCHNL_OP_GET_VF_RESOURCES: if (VF_IS_V11(ver)) valid_len = sizeof(u32); break; case VIRTCHNL_OP_CONFIG_TX_QUEUE: valid_len = sizeof(struct virtchnl_txq_info); break; case VIRTCHNL_OP_CONFIG_RX_QUEUE: valid_len = sizeof(struct virtchnl_rxq_info); break; case VIRTCHNL_OP_CONFIG_VSI_QUEUES: valid_len = sizeof(struct virtchnl_vsi_queue_config_info); if (msglen >= valid_len) { struct virtchnl_vsi_queue_config_info *vqc = (struct virtchnl_vsi_queue_config_info *)msg; valid_len += (vqc->num_queue_pairs * sizeof(struct virtchnl_queue_pair_info)); if (vqc->num_queue_pairs == 0) err_msg_format = TRUE; } break; case VIRTCHNL_OP_CONFIG_IRQ_MAP: valid_len = sizeof(struct virtchnl_irq_map_info); if (msglen >= valid_len) { struct virtchnl_irq_map_info *vimi = (struct virtchnl_irq_map_info *)msg; valid_len += (vimi->num_vectors * sizeof(struct virtchnl_vector_map)); if (vimi->num_vectors == 0) err_msg_format = TRUE; } break; case VIRTCHNL_OP_ENABLE_QUEUES: case VIRTCHNL_OP_DISABLE_QUEUES: valid_len = sizeof(struct virtchnl_queue_select); break; case VIRTCHNL_OP_ADD_ETH_ADDR: case VIRTCHNL_OP_DEL_ETH_ADDR: valid_len = sizeof(struct virtchnl_ether_addr_list); if (msglen >= valid_len) { struct virtchnl_ether_addr_list *veal = (struct virtchnl_ether_addr_list *)msg; valid_len += veal->num_elements * sizeof(struct virtchnl_ether_addr); if (veal->num_elements == 0) err_msg_format = TRUE; } break; case VIRTCHNL_OP_ADD_VLAN: case VIRTCHNL_OP_DEL_VLAN: valid_len = sizeof(struct virtchnl_vlan_filter_list); if (msglen >= valid_len) { struct virtchnl_vlan_filter_list *vfl = (struct virtchnl_vlan_filter_list *)msg; valid_len += vfl->num_elements * sizeof(u16); if (vfl->num_elements == 0) err_msg_format = TRUE; } break; case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: valid_len = sizeof(struct virtchnl_promisc_info); break; case VIRTCHNL_OP_GET_STATS: valid_len = sizeof(struct virtchnl_queue_select); break; case VIRTCHNL_OP_IWARP: /* These messages are opaque to us and will be validated in * the RDMA client code. We just need to check for nonzero * length. The firmware will enforce max length restrictions. */ if (msglen) valid_len = msglen; else err_msg_format = TRUE; break; case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: break; case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: valid_len = sizeof(struct virtchnl_iwarp_qvlist_info); if (msglen >= valid_len) { struct virtchnl_iwarp_qvlist_info *qv = (struct virtchnl_iwarp_qvlist_info *)msg; if (qv->num_vectors == 0) { err_msg_format = TRUE; break; } valid_len += ((qv->num_vectors - 1) * sizeof(struct virtchnl_iwarp_qv_info)); } break; case VIRTCHNL_OP_CONFIG_RSS_KEY: valid_len = sizeof(struct virtchnl_rss_key); if (msglen >= valid_len) { struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; valid_len += vrk->key_len - 1; } break; case VIRTCHNL_OP_CONFIG_RSS_LUT: valid_len = sizeof(struct virtchnl_rss_lut); if (msglen >= valid_len) { struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; valid_len += vrl->lut_entries - 1; } break; case VIRTCHNL_OP_GET_RSS_HENA_CAPS: break; case VIRTCHNL_OP_SET_RSS_HENA: valid_len = sizeof(struct virtchnl_rss_hena); break; case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: break; case VIRTCHNL_OP_REQUEST_QUEUES: valid_len = sizeof(struct virtchnl_vf_res_request); break; /* These are always errors coming from the VF. */ case VIRTCHNL_OP_EVENT: case VIRTCHNL_OP_UNKNOWN: default: return VIRTCHNL_ERR_PARAM; } /* few more checks */ if (err_msg_format || valid_len != msglen) return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH; return 0; } #endif /* _VIRTCHNL_H_ */