Index: releng/11.3/sys/dev/ixl/i40e_adminq.c =================================================================== --- releng/11.3/sys/dev/ixl/i40e_adminq.c (revision 349180) +++ releng/11.3/sys/dev/ixl/i40e_adminq.c (revision 349181) @@ -1,1103 +1,1128 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "i40e_status.h" #include "i40e_type.h" #include "i40e_register.h" #include "i40e_adminq.h" #include "i40e_prototype.h" /** * i40e_adminq_init_regs - Initialize AdminQ registers * @hw: pointer to the hardware structure * * This assumes the alloc_asq and alloc_arq functions have already been called **/ static void i40e_adminq_init_regs(struct i40e_hw *hw) { /* set head and tail registers in our local struct */ if (i40e_is_vf(hw)) { hw->aq.asq.tail = I40E_VF_ATQT1; hw->aq.asq.head = I40E_VF_ATQH1; hw->aq.asq.len = I40E_VF_ATQLEN1; hw->aq.asq.bal = I40E_VF_ATQBAL1; hw->aq.asq.bah = I40E_VF_ATQBAH1; hw->aq.arq.tail = I40E_VF_ARQT1; hw->aq.arq.head = I40E_VF_ARQH1; hw->aq.arq.len = I40E_VF_ARQLEN1; hw->aq.arq.bal = I40E_VF_ARQBAL1; hw->aq.arq.bah = I40E_VF_ARQBAH1; } else { hw->aq.asq.tail = I40E_PF_ATQT; hw->aq.asq.head = I40E_PF_ATQH; hw->aq.asq.len = I40E_PF_ATQLEN; hw->aq.asq.bal = I40E_PF_ATQBAL; hw->aq.asq.bah = I40E_PF_ATQBAH; hw->aq.arq.tail = I40E_PF_ARQT; hw->aq.arq.head = I40E_PF_ARQH; hw->aq.arq.len = I40E_PF_ARQLEN; hw->aq.arq.bal = I40E_PF_ARQBAL; hw->aq.arq.bah = I40E_PF_ARQBAH; } } /** * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings * @hw: pointer to the hardware structure **/ enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) { enum i40e_status_code ret_code; ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, i40e_mem_atq_ring, (hw->aq.num_asq_entries * sizeof(struct i40e_aq_desc)), I40E_ADMINQ_DESC_ALIGNMENT); if (ret_code) return ret_code; ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, (hw->aq.num_asq_entries * sizeof(struct i40e_asq_cmd_details))); if (ret_code) { i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); return ret_code; } return ret_code; } /** * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings * @hw: pointer to the hardware structure **/ enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) { enum i40e_status_code ret_code; ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, i40e_mem_arq_ring, (hw->aq.num_arq_entries * sizeof(struct i40e_aq_desc)), I40E_ADMINQ_DESC_ALIGNMENT); return ret_code; } /** * i40e_free_adminq_asq - Free Admin Queue send rings * @hw: pointer to the hardware structure * * This assumes the posted send buffers have already been cleaned * and de-allocated **/ void i40e_free_adminq_asq(struct i40e_hw *hw) { + i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); } /** * i40e_free_adminq_arq - Free Admin Queue receive rings * @hw: pointer to the hardware structure * * This assumes the posted receive buffers have already been cleaned * and de-allocated **/ void i40e_free_adminq_arq(struct i40e_hw *hw) { i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); } /** * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue * @hw: pointer to the hardware structure **/ static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw) { enum i40e_status_code ret_code; struct i40e_aq_desc *desc; struct i40e_dma_mem *bi; int i; /* We'll be allocating the buffer info memory first, then we can * allocate the mapped buffers for the event processing */ /* buffer_info structures do not need alignment */ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head, (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem))); if (ret_code) goto alloc_arq_bufs; hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va; /* allocate the mapped buffers */ for (i = 0; i < hw->aq.num_arq_entries; i++) { bi = &hw->aq.arq.r.arq_bi[i]; ret_code = i40e_allocate_dma_mem(hw, bi, i40e_mem_arq_buf, hw->aq.arq_buf_size, I40E_ADMINQ_DESC_ALIGNMENT); if (ret_code) goto unwind_alloc_arq_bufs; /* now configure the descriptors for use */ desc = I40E_ADMINQ_DESC(hw->aq.arq, i); desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF); if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB); desc->opcode = 0; /* This is in accordance with Admin queue design, there is no * register for buffer size configuration */ desc->datalen = CPU_TO_LE16((u16)bi->size); desc->retval = 0; desc->cookie_high = 0; desc->cookie_low = 0; desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa)); desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa)); desc->params.external.param0 = 0; desc->params.external.param1 = 0; } alloc_arq_bufs: return ret_code; unwind_alloc_arq_bufs: /* don't try to free the one that failed... */ i--; for (; i >= 0; i--) i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); return ret_code; } /** * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue * @hw: pointer to the hardware structure **/ static enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw) { enum i40e_status_code ret_code; struct i40e_dma_mem *bi; int i; /* No mapped memory needed yet, just the buffer info structures */ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head, (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem))); if (ret_code) goto alloc_asq_bufs; hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va; /* allocate the mapped buffers */ for (i = 0; i < hw->aq.num_asq_entries; i++) { bi = &hw->aq.asq.r.asq_bi[i]; ret_code = i40e_allocate_dma_mem(hw, bi, i40e_mem_asq_buf, hw->aq.asq_buf_size, I40E_ADMINQ_DESC_ALIGNMENT); if (ret_code) goto unwind_alloc_asq_bufs; } alloc_asq_bufs: return ret_code; unwind_alloc_asq_bufs: /* don't try to free the one that failed... */ i--; for (; i >= 0; i--) i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); return ret_code; } /** * i40e_free_arq_bufs - Free receive queue buffer info elements * @hw: pointer to the hardware structure **/ static void i40e_free_arq_bufs(struct i40e_hw *hw) { int i; /* free descriptors */ for (i = 0; i < hw->aq.num_arq_entries; i++) i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); /* free the descriptor memory */ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); /* free the dma header */ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); } /** * i40e_free_asq_bufs - Free send queue buffer info elements * @hw: pointer to the hardware structure **/ static void i40e_free_asq_bufs(struct i40e_hw *hw) { int i; /* only unmap if the address is non-NULL */ for (i = 0; i < hw->aq.num_asq_entries; i++) if (hw->aq.asq.r.asq_bi[i].pa) i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); /* free the buffer info list */ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); /* free the descriptor memory */ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); /* free the dma header */ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); } /** * i40e_config_asq_regs - configure ASQ registers * @hw: pointer to the hardware structure * * Configure base address and length registers for the transmit queue **/ static enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw) { enum i40e_status_code ret_code = I40E_SUCCESS; u32 reg = 0; /* Clear Head and Tail */ wr32(hw, hw->aq.asq.head, 0); wr32(hw, hw->aq.asq.tail, 0); /* set starting point */ if (!i40e_is_vf(hw)) wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | I40E_PF_ATQLEN_ATQENABLE_MASK)); if (i40e_is_vf(hw)) wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | I40E_VF_ATQLEN1_ATQENABLE_MASK)); wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa)); wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa)); /* Check one register to verify that config was applied */ reg = rd32(hw, hw->aq.asq.bal); if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa)) ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; return ret_code; } /** * i40e_config_arq_regs - ARQ register configuration * @hw: pointer to the hardware structure * * Configure base address and length registers for the receive (event queue) **/ static enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw) { enum i40e_status_code ret_code = I40E_SUCCESS; u32 reg = 0; /* Clear Head and Tail */ wr32(hw, hw->aq.arq.head, 0); wr32(hw, hw->aq.arq.tail, 0); /* set starting point */ if (!i40e_is_vf(hw)) wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | I40E_PF_ARQLEN_ARQENABLE_MASK)); if (i40e_is_vf(hw)) wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | I40E_VF_ARQLEN1_ARQENABLE_MASK)); wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa)); wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa)); /* Update tail in the HW to post pre-allocated buffers */ wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); /* Check one register to verify that config was applied */ reg = rd32(hw, hw->aq.arq.bal); if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa)) ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; return ret_code; } /** * i40e_init_asq - main initialization routine for ASQ * @hw: pointer to the hardware structure * * This is the main initialization routine for the Admin Send Queue * Prior to calling this function, drivers *MUST* set the following fields * in the hw->aq structure: * - hw->aq.num_asq_entries * - hw->aq.arq_buf_size * * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe **/ enum i40e_status_code i40e_init_asq(struct i40e_hw *hw) { enum i40e_status_code ret_code = I40E_SUCCESS; if (hw->aq.asq.count > 0) { /* queue already initialized */ ret_code = I40E_ERR_NOT_READY; goto init_adminq_exit; } /* verify input for valid configuration */ if ((hw->aq.num_asq_entries == 0) || (hw->aq.asq_buf_size == 0)) { ret_code = I40E_ERR_CONFIG; goto init_adminq_exit; } hw->aq.asq.next_to_use = 0; hw->aq.asq.next_to_clean = 0; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_asq_ring(hw); if (ret_code != I40E_SUCCESS) goto init_adminq_exit; /* allocate buffers in the rings */ ret_code = i40e_alloc_asq_bufs(hw); if (ret_code != I40E_SUCCESS) goto init_adminq_free_rings; /* initialize base registers */ ret_code = i40e_config_asq_regs(hw); if (ret_code != I40E_SUCCESS) - goto init_adminq_free_rings; + goto init_config_regs; /* success! */ hw->aq.asq.count = hw->aq.num_asq_entries; goto init_adminq_exit; init_adminq_free_rings: i40e_free_adminq_asq(hw); + return ret_code; +init_config_regs: + i40e_free_asq_bufs(hw); + init_adminq_exit: return ret_code; } /** * i40e_init_arq - initialize ARQ * @hw: pointer to the hardware structure * * The main initialization routine for the Admin Receive (Event) Queue. * Prior to calling this function, drivers *MUST* set the following fields * in the hw->aq structure: * - hw->aq.num_asq_entries * - hw->aq.arq_buf_size * * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe **/ enum i40e_status_code i40e_init_arq(struct i40e_hw *hw) { enum i40e_status_code ret_code = I40E_SUCCESS; if (hw->aq.arq.count > 0) { /* queue already initialized */ ret_code = I40E_ERR_NOT_READY; goto init_adminq_exit; } /* verify input for valid configuration */ if ((hw->aq.num_arq_entries == 0) || (hw->aq.arq_buf_size == 0)) { ret_code = I40E_ERR_CONFIG; goto init_adminq_exit; } hw->aq.arq.next_to_use = 0; hw->aq.arq.next_to_clean = 0; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_arq_ring(hw); if (ret_code != I40E_SUCCESS) goto init_adminq_exit; /* allocate buffers in the rings */ ret_code = i40e_alloc_arq_bufs(hw); if (ret_code != I40E_SUCCESS) goto init_adminq_free_rings; /* initialize base registers */ ret_code = i40e_config_arq_regs(hw); if (ret_code != I40E_SUCCESS) goto init_adminq_free_rings; /* success! */ hw->aq.arq.count = hw->aq.num_arq_entries; goto init_adminq_exit; init_adminq_free_rings: i40e_free_adminq_arq(hw); init_adminq_exit: return ret_code; } /** * i40e_shutdown_asq - shutdown the ASQ * @hw: pointer to the hardware structure * * The main shutdown routine for the Admin Send Queue **/ enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw) { enum i40e_status_code ret_code = I40E_SUCCESS; i40e_acquire_spinlock(&hw->aq.asq_spinlock); if (hw->aq.asq.count == 0) { ret_code = I40E_ERR_NOT_READY; goto shutdown_asq_out; } /* Stop firmware AdminQ processing */ wr32(hw, hw->aq.asq.head, 0); wr32(hw, hw->aq.asq.tail, 0); wr32(hw, hw->aq.asq.len, 0); wr32(hw, hw->aq.asq.bal, 0); wr32(hw, hw->aq.asq.bah, 0); hw->aq.asq.count = 0; /* to indicate uninitialized queue */ /* free ring buffers */ i40e_free_asq_bufs(hw); shutdown_asq_out: i40e_release_spinlock(&hw->aq.asq_spinlock); return ret_code; } /** * i40e_shutdown_arq - shutdown ARQ * @hw: pointer to the hardware structure * * The main shutdown routine for the Admin Receive Queue **/ enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw) { enum i40e_status_code ret_code = I40E_SUCCESS; i40e_acquire_spinlock(&hw->aq.arq_spinlock); if (hw->aq.arq.count == 0) { ret_code = I40E_ERR_NOT_READY; goto shutdown_arq_out; } /* Stop firmware AdminQ processing */ wr32(hw, hw->aq.arq.head, 0); wr32(hw, hw->aq.arq.tail, 0); wr32(hw, hw->aq.arq.len, 0); wr32(hw, hw->aq.arq.bal, 0); wr32(hw, hw->aq.arq.bah, 0); hw->aq.arq.count = 0; /* to indicate uninitialized queue */ /* free ring buffers */ i40e_free_arq_bufs(hw); shutdown_arq_out: i40e_release_spinlock(&hw->aq.arq_spinlock); return ret_code; } /** * i40e_resume_aq - resume AQ processing from 0 * @hw: pointer to the hardware structure **/ static void i40e_resume_aq(struct i40e_hw *hw) { /* Registers are reset after PF reset */ hw->aq.asq.next_to_use = 0; hw->aq.asq.next_to_clean = 0; i40e_config_asq_regs(hw); hw->aq.arq.next_to_use = 0; hw->aq.arq.next_to_clean = 0; i40e_config_arq_regs(hw); } /** * i40e_init_adminq - main initialization routine for Admin Queue * @hw: pointer to the hardware structure * * Prior to calling this function, drivers *MUST* set the following fields * in the hw->aq structure: * - hw->aq.num_asq_entries * - hw->aq.num_arq_entries * - hw->aq.arq_buf_size * - hw->aq.asq_buf_size **/ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw) { + struct i40e_adminq_info *aq = &hw->aq; + enum i40e_status_code ret_code; u16 cfg_ptr, oem_hi, oem_lo; u16 eetrack_lo, eetrack_hi; - enum i40e_status_code ret_code; int retry = 0; /* verify input for valid configuration */ - if ((hw->aq.num_arq_entries == 0) || - (hw->aq.num_asq_entries == 0) || - (hw->aq.arq_buf_size == 0) || - (hw->aq.asq_buf_size == 0)) { + if (aq->num_arq_entries == 0 || + aq->num_asq_entries == 0 || + aq->arq_buf_size == 0 || + aq->asq_buf_size == 0) { ret_code = I40E_ERR_CONFIG; goto init_adminq_exit; } - i40e_init_spinlock(&hw->aq.asq_spinlock); - i40e_init_spinlock(&hw->aq.arq_spinlock); + i40e_init_spinlock(&aq->asq_spinlock); + i40e_init_spinlock(&aq->arq_spinlock); /* Set up register offsets */ i40e_adminq_init_regs(hw); /* setup ASQ command write back timeout */ hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT; /* allocate the ASQ */ ret_code = i40e_init_asq(hw); if (ret_code != I40E_SUCCESS) goto init_adminq_destroy_spinlocks; /* allocate the ARQ */ ret_code = i40e_init_arq(hw); if (ret_code != I40E_SUCCESS) goto init_adminq_free_asq; /* VF has no need of firmware */ if (i40e_is_vf(hw)) goto init_adminq_exit; /* There are some cases where the firmware may not be quite ready * for AdminQ operations, so we retry the AdminQ setup a few times * if we see timeouts in this first AQ call. */ do { ret_code = i40e_aq_get_firmware_version(hw, - &hw->aq.fw_maj_ver, - &hw->aq.fw_min_ver, - &hw->aq.fw_build, - &hw->aq.api_maj_ver, - &hw->aq.api_min_ver, + &aq->fw_maj_ver, + &aq->fw_min_ver, + &aq->fw_build, + &aq->api_maj_ver, + &aq->api_min_ver, NULL); if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT) break; retry++; i40e_msec_delay(100); i40e_resume_aq(hw); } while (retry < 10); if (ret_code != I40E_SUCCESS) goto init_adminq_free_arq; /* get the NVM version info */ i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION, &hw->nvm.version); i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr); i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF), &oem_hi); i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)), &oem_lo); hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo; - - /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */ - if ((hw->aq.api_maj_ver > 1) || - ((hw->aq.api_maj_ver == 1) && - (hw->aq.api_min_ver >= 7))) - hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE; - - if (hw->mac.type == I40E_MAC_XL710 && - hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && - hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { - hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; + /* + * Some features were introduced in different FW API version + * for different MAC type. + */ + switch (hw->mac.type) { + case I40E_MAC_XL710: + if (aq->api_maj_ver > 1 || + (aq->api_maj_ver == 1 && + aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) { + hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; + hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; + /* The ability to RX (not drop) 802.1ad frames */ + hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE; + } + break; + case I40E_MAC_X722: + if (aq->api_maj_ver > 1 || + (aq->api_maj_ver == 1 && + aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722)) + hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; + /* fall through */ + default: + break; } /* Newer versions of firmware require lock when reading the NVM */ - if ((hw->aq.api_maj_ver > 1) || - ((hw->aq.api_maj_ver == 1) && - (hw->aq.api_min_ver >= 5))) + if (aq->api_maj_ver > 1 || + (aq->api_maj_ver == 1 && + aq->api_min_ver >= 5)) hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; - if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { + if (aq->api_maj_ver > 1 || + (aq->api_maj_ver == 1 && + aq->api_min_ver >= 8)) + hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT; + + if (aq->api_maj_ver > I40E_FW_API_VERSION_MAJOR) { ret_code = I40E_ERR_FIRMWARE_API_VERSION; goto init_adminq_free_arq; } /* pre-emptive resource lock release */ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); hw->nvm_release_on_done = FALSE; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; ret_code = I40E_SUCCESS; /* success! */ goto init_adminq_exit; init_adminq_free_arq: i40e_shutdown_arq(hw); init_adminq_free_asq: i40e_shutdown_asq(hw); init_adminq_destroy_spinlocks: - i40e_destroy_spinlock(&hw->aq.asq_spinlock); - i40e_destroy_spinlock(&hw->aq.arq_spinlock); + i40e_destroy_spinlock(&aq->asq_spinlock); + i40e_destroy_spinlock(&aq->arq_spinlock); init_adminq_exit: return ret_code; } /** * i40e_shutdown_adminq - shutdown routine for the Admin Queue * @hw: pointer to the hardware structure **/ enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw) { enum i40e_status_code ret_code = I40E_SUCCESS; if (i40e_check_asq_alive(hw)) i40e_aq_queue_shutdown(hw, TRUE); i40e_shutdown_asq(hw); i40e_shutdown_arq(hw); i40e_destroy_spinlock(&hw->aq.asq_spinlock); i40e_destroy_spinlock(&hw->aq.arq_spinlock); if (hw->nvm_buff.va) i40e_free_virt_mem(hw, &hw->nvm_buff); return ret_code; } /** * i40e_clean_asq - cleans Admin send queue * @hw: pointer to the hardware structure * * returns the number of free desc **/ u16 i40e_clean_asq(struct i40e_hw *hw) { struct i40e_adminq_ring *asq = &(hw->aq.asq); struct i40e_asq_cmd_details *details; u16 ntc = asq->next_to_clean; struct i40e_aq_desc desc_cb; struct i40e_aq_desc *desc; desc = I40E_ADMINQ_DESC(*asq, ntc); details = I40E_ADMINQ_DETAILS(*asq, ntc); while (rd32(hw, hw->aq.asq.head) != ntc) { - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); if (details->callback) { I40E_ADMINQ_CALLBACK cb_func = (I40E_ADMINQ_CALLBACK)details->callback; i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc), I40E_DMA_TO_DMA); cb_func(hw, &desc_cb); } i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM); i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM); ntc++; if (ntc == asq->count) ntc = 0; desc = I40E_ADMINQ_DESC(*asq, ntc); details = I40E_ADMINQ_DETAILS(*asq, ntc); } asq->next_to_clean = ntc; return I40E_DESC_UNUSED(asq); } /** * i40e_asq_done - check if FW has processed the Admin Send Queue * @hw: pointer to the hw struct * * Returns TRUE if the firmware has processed all descriptors on the * admin send queue. Returns FALSE if there are still requests pending. **/ bool i40e_asq_done(struct i40e_hw *hw) { /* AQ designers suggest use of head for better * timing reliability than DD bit */ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; } /** * i40e_asq_send_command - send command to Admin Queue * @hw: pointer to the hw struct * @desc: prefilled descriptor describing the command (non DMA mem) * @buff: buffer to use for indirect commands * @buff_size: size of buffer for indirect commands * @cmd_details: pointer to command details structure * * This is the main send command driver routine for the Admin Queue send * queue. It runs the queue, cleans the queue, etc **/ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ u16 buff_size, struct i40e_asq_cmd_details *cmd_details) { enum i40e_status_code status = I40E_SUCCESS; struct i40e_dma_mem *dma_buff = NULL; struct i40e_asq_cmd_details *details; struct i40e_aq_desc *desc_on_ring; bool cmd_completed = FALSE; u16 retval = 0; u32 val = 0; i40e_acquire_spinlock(&hw->aq.asq_spinlock); hw->aq.asq_last_status = I40E_AQ_RC_OK; if (hw->aq.asq.count == 0) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Admin queue not initialized.\n"); status = I40E_ERR_QUEUE_EMPTY; goto asq_send_command_error; } val = rd32(hw, hw->aq.asq.head); if (val >= hw->aq.num_asq_entries) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: head overrun at %d\n", val); - status = I40E_ERR_QUEUE_EMPTY; + status = I40E_ERR_ADMIN_QUEUE_FULL; goto asq_send_command_error; } details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); if (cmd_details) { i40e_memcpy(details, cmd_details, sizeof(struct i40e_asq_cmd_details), I40E_NONDMA_TO_NONDMA); /* If the cmd_details are defined copy the cookie. The * CPU_TO_LE32 is not needed here because the data is ignored * by the FW, only used by the driver */ if (details->cookie) { desc->cookie_high = CPU_TO_LE32(I40E_HI_DWORD(details->cookie)); desc->cookie_low = CPU_TO_LE32(I40E_LO_DWORD(details->cookie)); } } else { i40e_memset(details, 0, sizeof(struct i40e_asq_cmd_details), I40E_NONDMA_MEM); } /* clear requested flags and then set additional flags if defined */ desc->flags &= ~CPU_TO_LE16(details->flags_dis); desc->flags |= CPU_TO_LE16(details->flags_ena); if (buff_size > hw->aq.asq_buf_size) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Invalid buffer size: %d.\n", buff_size); status = I40E_ERR_INVALID_SIZE; goto asq_send_command_error; } if (details->postpone && !details->async) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Async flag not set along with postpone flag"); status = I40E_ERR_PARAM; goto asq_send_command_error; } /* call clean and check queue available function to reclaim the * descriptors that were processed by FW, the function returns the * number of desc available */ /* the clean function called here could be called in a separate thread * in case of asynchronous completions */ if (i40e_clean_asq(hw) == 0) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Error queue is full.\n"); status = I40E_ERR_ADMIN_QUEUE_FULL; goto asq_send_command_error; } /* initialize the temp desc pointer with the right desc */ desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); /* if the desc is available copy the temp desc to the right place */ i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc), I40E_NONDMA_TO_DMA); /* if buff is not NULL assume indirect command */ if (buff != NULL) { dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); /* copy the user buff into the respective DMA buff */ i40e_memcpy(dma_buff->va, buff, buff_size, I40E_NONDMA_TO_DMA); desc_on_ring->datalen = CPU_TO_LE16(buff_size); /* Update the address values in the desc with the pa value * for respective buffer */ desc_on_ring->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa)); desc_on_ring->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa)); } /* bump the tail */ - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); + i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n"); i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff, buff_size); (hw->aq.asq.next_to_use)++; if (hw->aq.asq.next_to_use == hw->aq.asq.count) hw->aq.asq.next_to_use = 0; if (!details->postpone) wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); /* if cmd_details are not defined or async flag is not set, * we need to wait for desc write back */ if (!details->async && !details->postpone) { u32 total_delay = 0; do { /* AQ designers suggest use of head for better * timing reliability than DD bit */ if (i40e_asq_done(hw)) break; i40e_usec_delay(50); total_delay += 50; } while (total_delay < hw->aq.asq_cmd_timeout); } /* if ready, copy the desc back to temp */ if (i40e_asq_done(hw)) { i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA); if (buff != NULL) i40e_memcpy(buff, dma_buff->va, buff_size, I40E_DMA_TO_NONDMA); retval = LE16_TO_CPU(desc->retval); if (retval != 0) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Command completed with error 0x%X.\n", retval); /* strip off FW internal code */ retval &= 0xff; } cmd_completed = TRUE; if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) status = I40E_SUCCESS; + else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY) + status = I40E_ERR_NOT_READY; else status = I40E_ERR_ADMIN_QUEUE_ERROR; hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; } - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer writeback:\n"); i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); /* save writeback aq if requested */ if (details->wb_desc) i40e_memcpy(details->wb_desc, desc_on_ring, sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA); /* update the error if time out occurred */ if ((!cmd_completed) && (!details->async && !details->postpone)) { if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: AQ Critical error.\n"); status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR; } else { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Writeback timeout.\n"); status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; } } asq_send_command_error: i40e_release_spinlock(&hw->aq.asq_spinlock); return status; } /** * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function * @desc: pointer to the temp descriptor (non DMA mem) * @opcode: the opcode can be used to decide which flags to turn off or on * * Fill the desc with default values **/ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode) { /* zero out the desc */ i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_NONDMA_MEM); desc->opcode = CPU_TO_LE16(opcode); desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI); } /** * i40e_clean_arq_element * @hw: pointer to the hw struct * @e: event info from the receive descriptor, includes any buffers * @pending: number of events that could be left to process * * This function cleans one Admin Receive Queue element and returns * the contents through e. It can also return how many events are * left to process through 'pending' **/ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw, struct i40e_arq_event_info *e, u16 *pending) { enum i40e_status_code ret_code = I40E_SUCCESS; u16 ntc = hw->aq.arq.next_to_clean; struct i40e_aq_desc *desc; struct i40e_dma_mem *bi; u16 desc_idx; u16 datalen; u16 flags; u16 ntu; /* pre-clean the event info */ i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM); /* take the lock before we start messing with the ring */ i40e_acquire_spinlock(&hw->aq.arq_spinlock); if (hw->aq.arq.count == 0) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: Admin queue not initialized.\n"); ret_code = I40E_ERR_QUEUE_EMPTY; goto clean_arq_element_err; } /* set next_to_use to head */ if (!i40e_is_vf(hw)) ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK; else ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK; if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; goto clean_arq_element_out; } /* now clean the next descriptor */ desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); desc_idx = ntc; hw->aq.arq_last_status = (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval); flags = LE16_TO_CPU(desc->flags); if (flags & I40E_AQ_FLAG_ERR) { ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: Event received with error 0x%X.\n", hw->aq.arq_last_status); } i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA); datalen = LE16_TO_CPU(desc->datalen); e->msg_len = min(datalen, e->buf_len); if (e->msg_buf != NULL && (e->msg_len != 0)) i40e_memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, e->msg_len, I40E_DMA_TO_NONDMA); - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); + i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n"); i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, hw->aq.arq_buf_size); /* Restore the original datalen and buffer address in the desc, * FW updates datalen to indicate the event message * size */ bi = &hw->aq.arq.r.arq_bi[ntc]; i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM); desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF); if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB); desc->datalen = CPU_TO_LE16((u16)bi->size); desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa)); desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa)); /* set tail = the last cleaned desc index. */ wr32(hw, hw->aq.arq.tail, ntc); /* ntc is updated to tail + 1 */ ntc++; if (ntc == hw->aq.num_arq_entries) ntc = 0; hw->aq.arq.next_to_clean = ntc; hw->aq.arq.next_to_use = ntu; i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc); clean_arq_element_out: /* Set pending if needed, unlock and return */ if (pending != NULL) *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); clean_arq_element_err: i40e_release_spinlock(&hw->aq.arq_spinlock); return ret_code; } Index: releng/11.3/sys/dev/ixl/i40e_adminq.h =================================================================== --- releng/11.3/sys/dev/ixl/i40e_adminq.h (revision 349180) +++ releng/11.3/sys/dev/ixl/i40e_adminq.h (revision 349181) @@ -1,167 +1,167 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _I40E_ADMINQ_H_ #define _I40E_ADMINQ_H_ #include "i40e_osdep.h" #include "i40e_status.h" #include "i40e_adminq_cmd.h" #define I40E_ADMINQ_DESC(R, i) \ (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i])) #define I40E_ADMINQ_DESC_ALIGNMENT 4096 struct i40e_adminq_ring { struct i40e_virt_mem dma_head; /* space for dma structures */ struct i40e_dma_mem desc_buf; /* descriptor ring memory */ struct i40e_virt_mem cmd_buf; /* command buffer memory */ union { struct i40e_dma_mem *asq_bi; struct i40e_dma_mem *arq_bi; } r; u16 count; /* Number of descriptors */ u16 rx_buf_len; /* Admin Receive Queue buffer length */ /* used for interrupt processing */ u16 next_to_use; u16 next_to_clean; /* used for queue tracking */ u32 head; u32 tail; u32 len; u32 bah; u32 bal; }; /* ASQ transaction details */ struct i40e_asq_cmd_details { void *callback; /* cast from type I40E_ADMINQ_CALLBACK */ u64 cookie; u16 flags_ena; u16 flags_dis; bool async; bool postpone; struct i40e_aq_desc *wb_desc; }; #define I40E_ADMINQ_DETAILS(R, i) \ (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i])) /* ARQ event information */ struct i40e_arq_event_info { struct i40e_aq_desc desc; u16 msg_len; u16 buf_len; u8 *msg_buf; }; /* Admin Queue information */ struct i40e_adminq_info { struct i40e_adminq_ring arq; /* receive queue */ struct i40e_adminq_ring asq; /* send queue */ u32 asq_cmd_timeout; /* send queue cmd write back timeout*/ u16 num_arq_entries; /* receive queue depth */ u16 num_asq_entries; /* send queue depth */ u16 arq_buf_size; /* receive queue buffer size */ u16 asq_buf_size; /* send queue buffer size */ u16 fw_maj_ver; /* firmware major version */ u16 fw_min_ver; /* firmware minor version */ u32 fw_build; /* firmware build number */ u16 api_maj_ver; /* api major version */ u16 api_min_ver; /* api minor version */ struct i40e_spinlock asq_spinlock; /* Send queue spinlock */ struct i40e_spinlock arq_spinlock; /* Receive queue spinlock */ /* last status values on send and receive queues */ enum i40e_admin_queue_err asq_last_status; enum i40e_admin_queue_err arq_last_status; }; /** * i40e_aq_rc_to_posix - convert errors to user-land codes * aq_ret: AdminQ handler error code can override aq_rc * aq_rc: AdminQ firmware error code to convert **/ static INLINE int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) { int aq_to_posix[] = { 0, /* I40E_AQ_RC_OK */ -EPERM, /* I40E_AQ_RC_EPERM */ -ENOENT, /* I40E_AQ_RC_ENOENT */ -ESRCH, /* I40E_AQ_RC_ESRCH */ -EINTR, /* I40E_AQ_RC_EINTR */ -EIO, /* I40E_AQ_RC_EIO */ -ENXIO, /* I40E_AQ_RC_ENXIO */ -E2BIG, /* I40E_AQ_RC_E2BIG */ -EAGAIN, /* I40E_AQ_RC_EAGAIN */ -ENOMEM, /* I40E_AQ_RC_ENOMEM */ -EACCES, /* I40E_AQ_RC_EACCES */ -EFAULT, /* I40E_AQ_RC_EFAULT */ -EBUSY, /* I40E_AQ_RC_EBUSY */ -EEXIST, /* I40E_AQ_RC_EEXIST */ -EINVAL, /* I40E_AQ_RC_EINVAL */ -ENOTTY, /* I40E_AQ_RC_ENOTTY */ -ENOSPC, /* I40E_AQ_RC_ENOSPC */ -ENOSYS, /* I40E_AQ_RC_ENOSYS */ -ERANGE, /* I40E_AQ_RC_ERANGE */ -EPIPE, /* I40E_AQ_RC_EFLUSHED */ -ESPIPE, /* I40E_AQ_RC_BAD_ADDR */ -EROFS, /* I40E_AQ_RC_EMODE */ -EFBIG, /* I40E_AQ_RC_EFBIG */ }; /* aq_rc is invalid if AQ timed out */ if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT) return -EAGAIN; if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0])))) return -ERANGE; return aq_to_posix[aq_rc]; } /* general information */ #define I40E_AQ_LARGE_BUF 512 #define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode); #endif /* _I40E_ADMINQ_H_ */ Index: releng/11.3/sys/dev/ixl/i40e_adminq_cmd.h =================================================================== --- releng/11.3/sys/dev/ixl/i40e_adminq_cmd.h (revision 349180) +++ releng/11.3/sys/dev/ixl/i40e_adminq_cmd.h (revision 349181) @@ -1,2780 +1,2825 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _I40E_ADMINQ_CMD_H_ #define _I40E_ADMINQ_CMD_H_ /* This header file defines the i40e Admin Queue commands and is shared between * i40e Firmware and Software. * * This file needs to comply with the Linux Kernel coding style. */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR_X722 0x0005 -#define I40E_FW_API_VERSION_MINOR_X710 0x0007 +#define I40E_FW_API_VERSION_MINOR_X722 0x0008 +#define I40E_FW_API_VERSION_MINOR_X710 0x0008 #define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \ I40E_FW_API_VERSION_MINOR_X710 : \ I40E_FW_API_VERSION_MINOR_X722) /* API version 1.7 implements additional link and PHY-specific APIs */ #define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007 +/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */ +#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006 struct i40e_aq_desc { __le16 flags; __le16 opcode; __le16 datalen; __le16 retval; __le32 cookie_high; __le32 cookie_low; union { struct { __le32 param0; __le32 param1; __le32 param2; __le32 param3; } internal; struct { __le32 param0; __le32 param1; __le32 addr_high; __le32 addr_low; } external; u8 raw[16]; } params; }; /* Flags sub-structure * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | */ /* command flags and offsets*/ #define I40E_AQ_FLAG_DD_SHIFT 0 #define I40E_AQ_FLAG_CMP_SHIFT 1 #define I40E_AQ_FLAG_ERR_SHIFT 2 #define I40E_AQ_FLAG_VFE_SHIFT 3 #define I40E_AQ_FLAG_LB_SHIFT 9 #define I40E_AQ_FLAG_RD_SHIFT 10 #define I40E_AQ_FLAG_VFC_SHIFT 11 #define I40E_AQ_FLAG_BUF_SHIFT 12 #define I40E_AQ_FLAG_SI_SHIFT 13 #define I40E_AQ_FLAG_EI_SHIFT 14 #define I40E_AQ_FLAG_FE_SHIFT 15 #define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ #define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ #define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ #define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ #define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ #define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ #define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ #define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ #define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ #define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ #define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ /* error codes */ enum i40e_admin_queue_err { I40E_AQ_RC_OK = 0, /* success */ I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ I40E_AQ_RC_ENOENT = 2, /* No such element */ I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ I40E_AQ_RC_EINTR = 4, /* operation interrupted */ I40E_AQ_RC_EIO = 5, /* I/O error */ I40E_AQ_RC_ENXIO = 6, /* No such resource */ I40E_AQ_RC_E2BIG = 7, /* Arg too long */ I40E_AQ_RC_EAGAIN = 8, /* Try again */ I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ I40E_AQ_RC_EACCES = 10, /* Permission denied */ I40E_AQ_RC_EFAULT = 11, /* Bad address */ I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ I40E_AQ_RC_EEXIST = 13, /* object already exists */ I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ I40E_AQ_RC_EFBIG = 22, /* File too large */ }; /* Admin Queue command opcodes */ enum i40e_admin_queue_opc { /* aq commands */ i40e_aqc_opc_get_version = 0x0001, i40e_aqc_opc_driver_version = 0x0002, i40e_aqc_opc_queue_shutdown = 0x0003, i40e_aqc_opc_set_pf_context = 0x0004, /* resource ownership */ i40e_aqc_opc_request_resource = 0x0008, i40e_aqc_opc_release_resource = 0x0009, i40e_aqc_opc_list_func_capabilities = 0x000A, i40e_aqc_opc_list_dev_capabilities = 0x000B, /* Proxy commands */ i40e_aqc_opc_set_proxy_config = 0x0104, i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105, /* LAA */ i40e_aqc_opc_mac_address_read = 0x0107, i40e_aqc_opc_mac_address_write = 0x0108, /* PXE */ i40e_aqc_opc_clear_pxe_mode = 0x0110, /* WoL commands */ i40e_aqc_opc_set_wol_filter = 0x0120, i40e_aqc_opc_get_wake_reason = 0x0121, i40e_aqc_opc_clear_all_wol_filters = 0x025E, /* internal switch commands */ i40e_aqc_opc_get_switch_config = 0x0200, i40e_aqc_opc_add_statistics = 0x0201, i40e_aqc_opc_remove_statistics = 0x0202, i40e_aqc_opc_set_port_parameters = 0x0203, i40e_aqc_opc_get_switch_resource_alloc = 0x0204, i40e_aqc_opc_set_switch_config = 0x0205, i40e_aqc_opc_rx_ctl_reg_read = 0x0206, i40e_aqc_opc_rx_ctl_reg_write = 0x0207, i40e_aqc_opc_add_vsi = 0x0210, i40e_aqc_opc_update_vsi_parameters = 0x0211, i40e_aqc_opc_get_vsi_parameters = 0x0212, i40e_aqc_opc_add_pv = 0x0220, i40e_aqc_opc_update_pv_parameters = 0x0221, i40e_aqc_opc_get_pv_parameters = 0x0222, i40e_aqc_opc_add_veb = 0x0230, i40e_aqc_opc_update_veb_parameters = 0x0231, i40e_aqc_opc_get_veb_parameters = 0x0232, i40e_aqc_opc_delete_element = 0x0243, i40e_aqc_opc_add_macvlan = 0x0250, i40e_aqc_opc_remove_macvlan = 0x0251, i40e_aqc_opc_add_vlan = 0x0252, i40e_aqc_opc_remove_vlan = 0x0253, i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, i40e_aqc_opc_add_tag = 0x0255, i40e_aqc_opc_remove_tag = 0x0256, i40e_aqc_opc_add_multicast_etag = 0x0257, i40e_aqc_opc_remove_multicast_etag = 0x0258, i40e_aqc_opc_update_tag = 0x0259, i40e_aqc_opc_add_control_packet_filter = 0x025A, i40e_aqc_opc_remove_control_packet_filter = 0x025B, i40e_aqc_opc_add_cloud_filters = 0x025C, i40e_aqc_opc_remove_cloud_filters = 0x025D, i40e_aqc_opc_clear_wol_switch_filters = 0x025E, i40e_aqc_opc_add_mirror_rule = 0x0260, i40e_aqc_opc_delete_mirror_rule = 0x0261, /* DCB commands */ i40e_aqc_opc_dcb_ignore_pfc = 0x0301, i40e_aqc_opc_dcb_updated = 0x0302, i40e_aqc_opc_set_dcb_parameters = 0x0303, /* TX scheduler */ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, i40e_aqc_opc_query_vsi_bw_config = 0x0408, i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, i40e_aqc_opc_enable_switching_comp_ets = 0x0413, i40e_aqc_opc_modify_switching_comp_ets = 0x0414, i40e_aqc_opc_disable_switching_comp_ets = 0x0415, i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, i40e_aqc_opc_query_port_ets_config = 0x0419, i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, i40e_aqc_opc_suspend_port_tx = 0x041B, i40e_aqc_opc_resume_port_tx = 0x041C, i40e_aqc_opc_configure_partition_bw = 0x041D, /* hmc */ i40e_aqc_opc_query_hmc_resource_profile = 0x0500, i40e_aqc_opc_set_hmc_resource_profile = 0x0501, /* phy commands*/ i40e_aqc_opc_get_phy_abilities = 0x0600, i40e_aqc_opc_set_phy_config = 0x0601, i40e_aqc_opc_set_mac_config = 0x0603, i40e_aqc_opc_set_link_restart_an = 0x0605, i40e_aqc_opc_get_link_status = 0x0607, i40e_aqc_opc_set_phy_int_mask = 0x0613, i40e_aqc_opc_get_local_advt_reg = 0x0614, i40e_aqc_opc_set_local_advt_reg = 0x0615, i40e_aqc_opc_get_partner_advt = 0x0616, i40e_aqc_opc_set_lb_modes = 0x0618, i40e_aqc_opc_get_phy_wol_caps = 0x0621, i40e_aqc_opc_set_phy_debug = 0x0622, i40e_aqc_opc_upload_ext_phy_fm = 0x0625, i40e_aqc_opc_run_phy_activity = 0x0626, i40e_aqc_opc_set_phy_register = 0x0628, i40e_aqc_opc_get_phy_register = 0x0629, /* NVM commands */ i40e_aqc_opc_nvm_read = 0x0701, i40e_aqc_opc_nvm_erase = 0x0702, i40e_aqc_opc_nvm_update = 0x0703, i40e_aqc_opc_nvm_config_read = 0x0704, i40e_aqc_opc_nvm_config_write = 0x0705, i40e_aqc_opc_nvm_progress = 0x0706, i40e_aqc_opc_oem_post_update = 0x0720, i40e_aqc_opc_thermal_sensor = 0x0721, /* virtualization commands */ i40e_aqc_opc_send_msg_to_pf = 0x0801, i40e_aqc_opc_send_msg_to_vf = 0x0802, i40e_aqc_opc_send_msg_to_peer = 0x0803, /* alternate structure */ i40e_aqc_opc_alternate_write = 0x0900, i40e_aqc_opc_alternate_write_indirect = 0x0901, i40e_aqc_opc_alternate_read = 0x0902, i40e_aqc_opc_alternate_read_indirect = 0x0903, i40e_aqc_opc_alternate_write_done = 0x0904, i40e_aqc_opc_alternate_set_mode = 0x0905, i40e_aqc_opc_alternate_clear_port = 0x0906, /* LLDP commands */ i40e_aqc_opc_lldp_get_mib = 0x0A00, i40e_aqc_opc_lldp_update_mib = 0x0A01, i40e_aqc_opc_lldp_add_tlv = 0x0A02, i40e_aqc_opc_lldp_update_tlv = 0x0A03, i40e_aqc_opc_lldp_delete_tlv = 0x0A04, i40e_aqc_opc_lldp_stop = 0x0A05, i40e_aqc_opc_lldp_start = 0x0A06, i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07, i40e_aqc_opc_lldp_set_local_mib = 0x0A08, i40e_aqc_opc_lldp_stop_start_spec_agent = 0x0A09, + i40e_aqc_opc_lldp_restore = 0x0A0A, /* Tunnel commands */ i40e_aqc_opc_add_udp_tunnel = 0x0B00, i40e_aqc_opc_del_udp_tunnel = 0x0B01, i40e_aqc_opc_set_rss_key = 0x0B02, i40e_aqc_opc_set_rss_lut = 0x0B03, i40e_aqc_opc_get_rss_key = 0x0B04, i40e_aqc_opc_get_rss_lut = 0x0B05, /* Async Events */ i40e_aqc_opc_event_lan_overflow = 0x1001, /* OEM commands */ i40e_aqc_opc_oem_parameter_change = 0xFE00, i40e_aqc_opc_oem_device_status_change = 0xFE01, i40e_aqc_opc_oem_ocsd_initialize = 0xFE02, i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, /* debug commands */ i40e_aqc_opc_debug_read_reg = 0xFF03, i40e_aqc_opc_debug_write_reg = 0xFF04, i40e_aqc_opc_debug_modify_reg = 0xFF07, i40e_aqc_opc_debug_dump_internals = 0xFF08, }; /* command structures and indirect data structures */ /* Structure naming conventions: * - no suffix for direct command descriptor structures * - _data for indirect sent data * - _resp for indirect return data (data which is both will use _data) * - _completion for direct return data * - _element_ for repeated elements (may also be _data or _resp) * * Command structures are expected to overlay the params.raw member of the basic * descriptor, and as such cannot exceed 16 bytes in length. */ /* This macro is used to generate a compilation error if a structure * is not exactly the correct length. It gives a divide by zero error if the * structure is not of the correct size, otherwise it creates an enum that is * never used. */ #define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \ { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } /* This macro is used extensively to ensure that command structures are 16 * bytes in length as they have to map to the raw array of that size. */ #define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) /* internal (0x00XX) commands */ /* Get version (direct 0x0001) */ struct i40e_aqc_get_version { __le32 rom_ver; __le32 fw_build; __le16 fw_major; __le16 fw_minor; __le16 api_major; __le16 api_minor; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); /* Send driver version (indirect 0x0002) */ struct i40e_aqc_driver_version { u8 driver_major_ver; u8 driver_minor_ver; u8 driver_build_ver; u8 driver_subbuild_ver; u8 reserved[4]; __le32 address_high; __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); /* Queue Shutdown (direct 0x0003) */ struct i40e_aqc_queue_shutdown { __le32 driver_unloading; #define I40E_AQ_DRIVER_UNLOADING 0x1 u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); /* Set PF context (0x0004, direct) */ struct i40e_aqc_set_pf_context { u8 pf_id; u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); /* Request resource ownership (direct 0x0008) * Release resource ownership (direct 0x0009) */ #define I40E_AQ_RESOURCE_NVM 1 #define I40E_AQ_RESOURCE_SDP 2 #define I40E_AQ_RESOURCE_ACCESS_READ 1 #define I40E_AQ_RESOURCE_ACCESS_WRITE 2 #define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 #define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 struct i40e_aqc_request_resource { __le16 resource_id; __le16 access_type; __le32 timeout; __le32 resource_number; u8 reserved[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); /* Get function capabilities (indirect 0x000A) * Get device capabilities (indirect 0x000B) */ struct i40e_aqc_list_capabilites { u8 command_flags; #define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 u8 pf_index; u8 reserved[2]; __le32 count; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); struct i40e_aqc_list_capabilities_element_resp { __le16 id; u8 major_rev; u8 minor_rev; __le32 number; __le32 logical_id; __le32 phys_id; u8 reserved[16]; }; /* list of caps */ #define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 #define I40E_AQ_CAP_ID_MNG_MODE 0x0002 #define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 #define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 #define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 #define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 #define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008 #define I40E_AQ_CAP_ID_SRIOV 0x0012 #define I40E_AQ_CAP_ID_VF 0x0013 #define I40E_AQ_CAP_ID_VMDQ 0x0014 #define I40E_AQ_CAP_ID_8021QBG 0x0015 #define I40E_AQ_CAP_ID_8021QBR 0x0016 #define I40E_AQ_CAP_ID_VSI 0x0017 #define I40E_AQ_CAP_ID_DCB 0x0018 #define I40E_AQ_CAP_ID_FCOE 0x0021 #define I40E_AQ_CAP_ID_ISCSI 0x0022 #define I40E_AQ_CAP_ID_RSS 0x0040 #define I40E_AQ_CAP_ID_RXQ 0x0041 #define I40E_AQ_CAP_ID_TXQ 0x0042 #define I40E_AQ_CAP_ID_MSIX 0x0043 #define I40E_AQ_CAP_ID_VF_MSIX 0x0044 #define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 #define I40E_AQ_CAP_ID_1588 0x0046 #define I40E_AQ_CAP_ID_IWARP 0x0051 #define I40E_AQ_CAP_ID_LED 0x0061 #define I40E_AQ_CAP_ID_SDP 0x0062 #define I40E_AQ_CAP_ID_MDIO 0x0063 #define I40E_AQ_CAP_ID_WSR_PROT 0x0064 #define I40E_AQ_CAP_ID_NVM_MGMT 0x0080 #define I40E_AQ_CAP_ID_FLEX10 0x00F1 #define I40E_AQ_CAP_ID_CEM 0x00F2 /* Set CPPM Configuration (direct 0x0103) */ struct i40e_aqc_cppm_configuration { __le16 command_flags; #define I40E_AQ_CPPM_EN_LTRC 0x0800 #define I40E_AQ_CPPM_EN_DMCTH 0x1000 #define I40E_AQ_CPPM_EN_DMCTLX 0x2000 #define I40E_AQ_CPPM_EN_HPTC 0x4000 #define I40E_AQ_CPPM_EN_DMARC 0x8000 __le16 ttlx; __le32 dmacr; __le16 dmcth; u8 hptc; u8 reserved; __le32 pfltrc; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); /* Set ARP Proxy command / response (indirect 0x0104) */ struct i40e_aqc_arp_proxy_data { __le16 command_flags; #define I40E_AQ_ARP_INIT_IPV4 0x0800 #define I40E_AQ_ARP_UNSUP_CTL 0x1000 #define I40E_AQ_ARP_ENA 0x2000 #define I40E_AQ_ARP_ADD_IPV4 0x4000 #define I40E_AQ_ARP_DEL_IPV4 0x8000 __le16 table_id; __le32 enabled_offloads; #define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020 #define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800 __le32 ip_addr; u8 mac_addr[6]; u8 reserved[2]; }; I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data); /* Set NS Proxy Table Entry Command (indirect 0x0105) */ struct i40e_aqc_ns_proxy_data { __le16 table_idx_mac_addr_0; __le16 table_idx_mac_addr_1; __le16 table_idx_ipv6_0; __le16 table_idx_ipv6_1; __le16 control; #define I40E_AQ_NS_PROXY_ADD_0 0x0001 #define I40E_AQ_NS_PROXY_DEL_0 0x0002 #define I40E_AQ_NS_PROXY_ADD_1 0x0004 #define I40E_AQ_NS_PROXY_DEL_1 0x0008 #define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010 #define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020 #define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040 #define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080 #define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100 #define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200 #define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400 #define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800 #define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000 u8 mac_addr_0[6]; u8 mac_addr_1[6]; u8 local_mac_addr[6]; u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ u8 ipv6_addr_1[16]; }; I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data); /* Manage LAA Command (0x0106) - obsolete */ struct i40e_aqc_mng_laa { __le16 command_flags; #define I40E_AQ_LAA_FLAG_WR 0x8000 u8 reserved[2]; __le32 sal; __le16 sah; u8 reserved2[6]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa); /* Manage MAC Address Read Command (indirect 0x0107) */ struct i40e_aqc_mac_address_read { __le16 command_flags; #define I40E_AQC_LAN_ADDR_VALID 0x10 #define I40E_AQC_SAN_ADDR_VALID 0x20 #define I40E_AQC_PORT_ADDR_VALID 0x40 #define I40E_AQC_WOL_ADDR_VALID 0x80 #define I40E_AQC_MC_MAG_EN_VALID 0x100 #define I40E_AQC_WOL_PRESERVE_STATUS 0x200 #define I40E_AQC_ADDR_VALID_MASK 0x3F0 u8 reserved[6]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); struct i40e_aqc_mac_address_read_data { u8 pf_lan_mac[6]; u8 pf_san_mac[6]; u8 port_mac[6]; u8 pf_wol_mac[6]; }; I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); /* Manage MAC Address Write Command (0x0108) */ struct i40e_aqc_mac_address_write { __le16 command_flags; #define I40E_AQC_MC_MAG_EN 0x0100 #define I40E_AQC_WOL_PRESERVE_ON_PFR 0x0200 #define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 #define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 #define I40E_AQC_WRITE_TYPE_PORT 0x8000 #define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000 #define I40E_AQC_WRITE_TYPE_MASK 0xC000 __le16 mac_sah; __le32 mac_sal; u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); /* PXE commands (0x011x) */ /* Clear PXE Command and response (direct 0x0110) */ struct i40e_aqc_clear_pxe { u8 rx_cnt; u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); /* Set WoL Filter (0x0120) */ struct i40e_aqc_set_wol_filter { __le16 filter_index; #define I40E_AQC_MAX_NUM_WOL_FILTERS 8 #define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15 #define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \ I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT) #define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0 #define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \ I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT) __le16 cmd_flags; #define I40E_AQC_SET_WOL_FILTER 0x8000 #define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000 #define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000 #define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0 #define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1 __le16 valid_flags; #define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000 #define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000 u8 reserved[2]; __le32 address_high; __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter); struct i40e_aqc_set_wol_filter_data { u8 filter[128]; u8 mask[16]; }; I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data); /* Get Wake Reason (0x0121) */ struct i40e_aqc_get_wake_reason_completion { u8 reserved_1[2]; __le16 wake_reason; #define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0 #define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT) #define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8 #define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT) u8 reserved_2[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion); /* Switch configuration commands (0x02xx) */ /* Used by many indirect commands that only pass an seid and a buffer in the * command */ struct i40e_aqc_switch_seid { __le16 seid; u8 reserved[6]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); /* Get Switch Configuration command (indirect 0x0200) * uses i40e_aqc_switch_seid for the descriptor */ struct i40e_aqc_get_switch_config_header_resp { __le16 num_reported; __le16 num_total; u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp); struct i40e_aqc_switch_config_element_resp { u8 element_type; #define I40E_AQ_SW_ELEM_TYPE_MAC 1 #define I40E_AQ_SW_ELEM_TYPE_PF 2 #define I40E_AQ_SW_ELEM_TYPE_VF 3 #define I40E_AQ_SW_ELEM_TYPE_EMP 4 #define I40E_AQ_SW_ELEM_TYPE_BMC 5 #define I40E_AQ_SW_ELEM_TYPE_PV 16 #define I40E_AQ_SW_ELEM_TYPE_VEB 17 #define I40E_AQ_SW_ELEM_TYPE_PA 18 #define I40E_AQ_SW_ELEM_TYPE_VSI 19 u8 revision; #define I40E_AQ_SW_ELEM_REV_1 1 __le16 seid; __le16 uplink_seid; __le16 downlink_seid; u8 reserved[3]; u8 connection_type; #define I40E_AQ_CONN_TYPE_REGULAR 0x1 #define I40E_AQ_CONN_TYPE_DEFAULT 0x2 #define I40E_AQ_CONN_TYPE_CASCADED 0x3 __le16 scheduler_id; __le16 element_info; }; I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp); /* Get Switch Configuration (indirect 0x0200) * an array of elements are returned in the response buffer * the first in the array is the header, remainder are elements */ struct i40e_aqc_get_switch_config_resp { struct i40e_aqc_get_switch_config_header_resp header; struct i40e_aqc_switch_config_element_resp element[1]; }; I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp); /* Add Statistics (direct 0x0201) * Remove Statistics (direct 0x0202) */ struct i40e_aqc_add_remove_statistics { __le16 seid; __le16 vlan; __le16 stat_index; u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); /* Set Port Parameters command (direct 0x0203) */ struct i40e_aqc_set_port_parameters { __le16 command_flags; #define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 #define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ #define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 __le16 bad_frame_vsi; #define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0 #define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF __le16 default_seid; /* reserved for command */ u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); /* Get Switch Resource Allocation (indirect 0x0204) */ struct i40e_aqc_get_switch_resource_alloc { u8 num_entries; /* reserved for command */ u8 reserved[7]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); /* expect an array of these structs in the response buffer */ struct i40e_aqc_switch_resource_alloc_element_resp { u8 resource_type; #define I40E_AQ_RESOURCE_TYPE_VEB 0x0 #define I40E_AQ_RESOURCE_TYPE_VSI 0x1 #define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 #define I40E_AQ_RESOURCE_TYPE_STAG 0x3 #define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 #define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 #define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 #define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 #define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 #define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 #define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA #define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB #define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC #define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD #define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF #define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 #define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 #define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 #define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 u8 reserved1; __le16 guaranteed; __le16 total; __le16 used; __le16 total_unalloced; u8 reserved2[6]; }; I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp); /* Set Switch Configuration (direct 0x0205) */ struct i40e_aqc_set_switch_config { __le16 flags; /* flags used for both fields below */ #define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 #define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 #define I40E_AQ_SET_SWITCH_CFG_HW_ATR_EVICT 0x0004 __le16 valid_flags; /* The ethertype in switch_tag is dropped on ingress and used * internally by the switch. Set this to zero for the default * of 0x88a8 (802.1ad). Should be zero for firmware API * versions lower than 1.7. */ __le16 switch_tag; /* The ethertypes in first_tag and second_tag are used to * match the outer and inner VLAN tags (respectively) when HW * double VLAN tagging is enabled via the set port parameters * AQ command. Otherwise these are both ignored. Set them to * zero for their defaults of 0x8100 (802.1Q). Should be zero * for firmware API versions lower than 1.7. */ __le16 first_tag; __le16 second_tag; /* Next byte is split into following: * Bit 7 : 0 : No action, 1: Switch to mode defined by bits 6:0 * Bit 6 : 0 : Destination Port, 1: source port * Bit 5..4 : L4 type * 0: rsvd * 1: TCP * 2: UDP * 3: Both TCP and UDP * Bits 3:0 Mode * 0: default mode * 1: L4 port only mode * 2: non-tunneled mode * 3: tunneled mode */ #define I40E_AQ_SET_SWITCH_BIT7_VALID 0x80 #define I40E_AQ_SET_SWITCH_L4_SRC_PORT 0x40 #define I40E_AQ_SET_SWITCH_L4_TYPE_RSVD 0x00 #define I40E_AQ_SET_SWITCH_L4_TYPE_TCP 0x10 #define I40E_AQ_SET_SWITCH_L4_TYPE_UDP 0x20 #define I40E_AQ_SET_SWITCH_L4_TYPE_BOTH 0x30 #define I40E_AQ_SET_SWITCH_MODE_DEFAULT 0x00 #define I40E_AQ_SET_SWITCH_MODE_L4_PORT 0x01 #define I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL 0x02 #define I40E_AQ_SET_SWITCH_MODE_TUNNEL 0x03 u8 mode; u8 rsvd5[5]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config); /* Read Receive control registers (direct 0x0206) * Write Receive control registers (direct 0x0207) * used for accessing Rx control registers that can be * slow and need special handling when under high Rx load */ struct i40e_aqc_rx_ctl_reg_read_write { __le32 reserved1; __le32 address; __le32 reserved2; __le32 value; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write); /* Add VSI (indirect 0x0210) * this indirect command uses struct i40e_aqc_vsi_properties_data * as the indirect buffer (128 bytes) * * Update VSI (indirect 0x211) * uses the same data structure as Add VSI * * Get VSI (indirect 0x0212) * uses the same completion and data structure as Add VSI */ struct i40e_aqc_add_get_update_vsi { __le16 uplink_seid; u8 connection_type; #define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 #define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 #define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 u8 reserved1; u8 vf_id; u8 reserved2; __le16 vsi_flags; #define I40E_AQ_VSI_TYPE_SHIFT 0x0 #define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) #define I40E_AQ_VSI_TYPE_VF 0x0 #define I40E_AQ_VSI_TYPE_VMDQ2 0x1 #define I40E_AQ_VSI_TYPE_PF 0x2 #define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 #define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); struct i40e_aqc_add_get_update_vsi_completion { __le16 seid; __le16 vsi_number; __le16 vsi_used; __le16 vsi_free; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion); struct i40e_aqc_vsi_properties_data { /* first 96 byte are written by SW */ __le16 valid_sections; #define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 #define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 #define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 #define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 #define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 #define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 #define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 #define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 #define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 #define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 /* switch section */ __le16 switch_id; /* 12bit id combined with flags below */ #define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 #define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) #define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 #define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 #define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 u8 sw_reserved[2]; /* security section */ u8 sec_flags; #define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 #define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 #define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 u8 sec_reserved; /* VLAN section */ __le16 pvid; /* VLANS include priority bits */ __le16 fcoe_pvid; u8 port_vlan_flags; #define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 #define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ I40E_AQ_VSI_PVLAN_MODE_SHIFT) #define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 #define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 #define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 #define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 #define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 #define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ I40E_AQ_VSI_PVLAN_EMOD_SHIFT) #define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 #define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 #define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 #define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 u8 pvlan_reserved[3]; /* ingress egress up sections */ __le32 ingress_table; /* bitmap, 3 bits per up */ #define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 #define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) #define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 #define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) #define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 #define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) #define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 #define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) #define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 #define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) #define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 #define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) #define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 #define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) #define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 #define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) __le32 egress_table; /* same defines as for ingress table */ /* cascaded PV section */ __le16 cas_pv_tag; u8 cas_pv_flags; #define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 #define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) #define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 #define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 #define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 #define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 #define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 #define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 u8 cas_pv_reserved; /* queue mapping section */ __le16 mapping_flags; #define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 #define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 __le16 queue_mapping[16]; #define I40E_AQ_VSI_QUEUE_SHIFT 0x0 #define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) __le16 tc_mapping[8]; #define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 #define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) #define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 #define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) /* queueing option section */ u8 queueing_opt_flags; #define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04 #define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08 #define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 #define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 #define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00 #define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40 u8 queueing_opt_reserved[3]; /* scheduler section */ u8 up_enable_bits; u8 sched_reserved; /* outer up section */ __le32 outer_up_table; /* same structure and defines as ingress tbl */ u8 cmd_reserved[8]; /* last 32 bytes are written by FW */ __le16 qs_handle[8]; #define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF __le16 stat_counter_idx; __le16 sched_id; u8 resp_reserved[12]; }; I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); /* Add Port Virtualizer (direct 0x0220) * also used for update PV (direct 0x0221) but only flags are used * (IS_CTRL_PORT only works on add PV) */ struct i40e_aqc_add_update_pv { __le16 command_flags; #define I40E_AQC_PV_FLAG_PV_TYPE 0x1 #define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 #define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 #define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 __le16 uplink_seid; __le16 connected_seid; u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); struct i40e_aqc_add_update_pv_completion { /* reserved for update; for add also encodes error if rc == ENOSPC */ __le16 pv_seid; #define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 #define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 #define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 #define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); /* Get PV Params (direct 0x0222) * uses i40e_aqc_switch_seid for the descriptor */ struct i40e_aqc_get_pv_params_completion { __le16 seid; __le16 default_stag; __le16 pv_flags; /* same flags as add_pv */ #define I40E_AQC_GET_PV_PV_TYPE 0x1 #define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 #define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 u8 reserved[8]; __le16 default_port_seid; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); /* Add VEB (direct 0x0230) */ struct i40e_aqc_add_veb { __le16 uplink_seid; __le16 downlink_seid; __le16 veb_flags; #define I40E_AQC_ADD_VEB_FLOATING 0x1 #define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 #define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) #define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 #define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 #define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */ #define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10 u8 enable_tcs; u8 reserved[9]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); struct i40e_aqc_add_veb_completion { u8 reserved[6]; __le16 switch_seid; /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ __le16 veb_seid; #define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 #define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 #define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 #define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 __le16 statistic_index; __le16 vebs_used; __le16 vebs_free; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); /* Get VEB Parameters (direct 0x0232) * uses i40e_aqc_switch_seid for the descriptor */ struct i40e_aqc_get_veb_parameters_completion { __le16 seid; __le16 switch_id; __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ __le16 statistic_index; __le16 vebs_used; __le16 vebs_free; u8 reserved[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); /* Delete Element (direct 0x0243) * uses the generic i40e_aqc_switch_seid */ /* Add MAC-VLAN (indirect 0x0250) */ /* used for the command for most vlan commands */ struct i40e_aqc_macvlan { __le16 num_addresses; __le16 seid[3]; #define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 #define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) #define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); /* indirect data for command and response */ struct i40e_aqc_add_macvlan_element_data { u8 mac_addr[6]; __le16 vlan_tag; __le16 flags; #define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 #define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 #define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 #define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 #define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010 __le16 queue_number; #define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 #define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) /* response section */ u8 match_method; #define I40E_AQC_MM_PERFECT_MATCH 0x01 #define I40E_AQC_MM_HASH_MATCH 0x02 #define I40E_AQC_MM_ERR_NO_RES 0xFF u8 reserved1[3]; }; struct i40e_aqc_add_remove_macvlan_completion { __le16 perfect_mac_used; __le16 perfect_mac_free; __le16 unicast_hash_free; __le16 multicast_hash_free; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion); /* Remove MAC-VLAN (indirect 0x0251) * uses i40e_aqc_macvlan for the descriptor * data points to an array of num_addresses of elements */ struct i40e_aqc_remove_macvlan_element_data { u8 mac_addr[6]; __le16 vlan_tag; u8 flags; #define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 #define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 #define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 #define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 u8 reserved[3]; /* reply section */ u8 error_code; #define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 #define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF u8 reply_reserved[3]; }; /* Add VLAN (indirect 0x0252) * Remove VLAN (indirect 0x0253) * use the generic i40e_aqc_macvlan for the command */ struct i40e_aqc_add_remove_vlan_element_data { __le16 vlan_tag; u8 vlan_flags; /* flags for add VLAN */ #define I40E_AQC_ADD_VLAN_LOCAL 0x1 #define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 #define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) #define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 #define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 #define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 #define I40E_AQC_VLAN_PTYPE_SHIFT 3 #define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) #define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 #define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 #define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 #define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 /* flags for remove VLAN */ #define I40E_AQC_REMOVE_VLAN_ALL 0x1 u8 reserved; u8 result; /* flags for add VLAN */ #define I40E_AQC_ADD_VLAN_SUCCESS 0x0 #define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE #define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF /* flags for remove VLAN */ #define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 #define I40E_AQC_REMOVE_VLAN_FAIL 0xFF u8 reserved1[3]; }; struct i40e_aqc_add_remove_vlan_completion { u8 reserved[4]; __le16 vlans_used; __le16 vlans_free; __le32 addr_high; __le32 addr_low; }; /* Set VSI Promiscuous Modes (direct 0x0254) */ struct i40e_aqc_set_vsi_promiscuous_modes { __le16 promiscuous_flags; __le16 valid_flags; /* flags used for both fields above */ #define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 #define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 #define I40E_AQC_SET_VSI_DEFAULT 0x08 #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 #define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 __le16 seid; #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF __le16 vlan_tag; #define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF #define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); /* Add S/E-tag command (direct 0x0255) * Uses generic i40e_aqc_add_remove_tag_completion for completion */ struct i40e_aqc_add_tag { __le16 flags; #define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 __le16 seid; #define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 #define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) __le16 tag; __le16 queue_number; u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); struct i40e_aqc_add_remove_tag_completion { u8 reserved[12]; __le16 tags_used; __le16 tags_free; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); /* Remove S/E-tag command (direct 0x0256) * Uses generic i40e_aqc_add_remove_tag_completion for completion */ struct i40e_aqc_remove_tag { __le16 seid; #define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 #define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) __le16 tag; u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag); /* Add multicast E-Tag (direct 0x0257) * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields * and no external data */ struct i40e_aqc_add_remove_mcast_etag { __le16 pv_seid; __le16 etag; u8 num_unicast_etags; u8 reserved[3]; __le32 addr_high; /* address of array of 2-byte s-tags */ __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); struct i40e_aqc_add_remove_mcast_etag_completion { u8 reserved[4]; __le16 mcast_etags_used; __le16 mcast_etags_free; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); /* Update S/E-Tag (direct 0x0259) */ struct i40e_aqc_update_tag { __le16 seid; #define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 #define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) __le16 old_tag; __le16 new_tag; u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); struct i40e_aqc_update_tag_completion { u8 reserved[12]; __le16 tags_used; __le16 tags_free; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); /* Add Control Packet filter (direct 0x025A) * Remove Control Packet filter (direct 0x025B) * uses the i40e_aqc_add_oveb_cloud, * and the generic direct completion structure */ struct i40e_aqc_add_remove_control_packet_filter { u8 mac[6]; __le16 etype; __le16 flags; #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 __le16 seid; #define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 #define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) __le16 queue; u8 reserved[2]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); struct i40e_aqc_add_remove_control_packet_filter_completion { __le16 mac_etype_used; __le16 etype_used; __le16 mac_etype_free; __le16 etype_free; u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); /* Add Cloud filters (indirect 0x025C) * Remove Cloud filters (indirect 0x025D) * uses the i40e_aqc_add_remove_cloud_filters, * and the generic indirect completion structure */ struct i40e_aqc_add_remove_cloud_filters { u8 num_filters; u8 reserved; __le16 seid; #define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 #define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) u8 reserved2[4]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); struct i40e_aqc_add_remove_cloud_filters_element_data { u8 outer_mac[6]; u8 inner_mac[6]; __le16 inner_vlan; union { struct { u8 reserved[12]; u8 data[4]; } v4; struct { u8 data[16]; } v6; } ipaddr; __le16 flags; #define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 #define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ I40E_AQC_ADD_CLOUD_FILTER_SHIFT) /* 0x0000 reserved */ #define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 /* 0x0002 reserved */ #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 /* 0x0005 reserved */ #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 /* 0x0007 reserved */ /* 0x0008 reserved */ #define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 #define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A #define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B #define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C #define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 #define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 #define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 #define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 #define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5 #define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000 #define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000 #define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000 __le32 tenant_id; u8 reserved[4]; __le16 queue_number; #define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 #define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \ I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) u8 reserved2[14]; /* response section */ u8 allocation_result; #define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 #define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF u8 response_reserved[7]; }; struct i40e_aqc_remove_cloud_filters_completion { __le16 perfect_ovlan_used; __le16 perfect_ovlan_free; __le16 vlan_used; __le16 vlan_free; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); /* Add Mirror Rule (indirect or direct 0x0260) * Delete Mirror Rule (indirect or direct 0x0261) * note: some rule types (4,5) do not use an external buffer. * take care to set the flags correctly. */ struct i40e_aqc_add_delete_mirror_rule { __le16 seid; __le16 rule_type; #define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 #define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ I40E_AQC_MIRROR_RULE_TYPE_SHIFT) #define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 #define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 #define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 #define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 #define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 __le16 num_entries; __le16 destination; /* VSI for add, rule id for delete */ __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); struct i40e_aqc_add_delete_mirror_rule_completion { u8 reserved[2]; __le16 rule_id; /* only used on add */ __le16 mirror_rules_used; __le16 mirror_rules_free; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); /* DCB 0x03xx*/ /* PFC Ignore (direct 0x0301) * the command and response use the same descriptor structure */ struct i40e_aqc_pfc_ignore { u8 tc_bitmap; u8 command_flags; /* unused on response */ #define I40E_AQC_PFC_IGNORE_SET 0x80 #define I40E_AQC_PFC_IGNORE_CLEAR 0x0 u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); /* DCB Update (direct 0x0302) uses the i40e_aq_desc structure * with no parameters */ /* TX scheduler 0x04xx */ /* Almost all the indirect commands use * this generic struct to pass the SEID in param0 */ struct i40e_aqc_tx_sched_ind { __le16 vsi_seid; u8 reserved[6]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); /* Several commands respond with a set of queue set handles */ struct i40e_aqc_qs_handles_resp { __le16 qs_handles[8]; }; /* Configure VSI BW limits (direct 0x0400) */ struct i40e_aqc_configure_vsi_bw_limit { __le16 vsi_seid; u8 reserved[2]; __le16 credit; u8 reserved1[2]; u8 max_credit; /* 0-3, limit = 2^max */ u8 reserved2[7]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); /* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406) * responds with i40e_aqc_qs_handles_resp */ struct i40e_aqc_configure_vsi_ets_sla_bw_data { u8 tc_valid_bits; u8 reserved[15]; __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ __le16 tc_bw_max[2]; u8 reserved1[28]; }; I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data); /* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) * responds with i40e_aqc_qs_handles_resp */ struct i40e_aqc_configure_vsi_tc_bw_data { u8 tc_valid_bits; u8 reserved[3]; u8 tc_bw_credits[8]; u8 reserved1[4]; __le16 qs_handles[8]; }; I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data); /* Query vsi bw configuration (indirect 0x0408) */ struct i40e_aqc_query_vsi_bw_config_resp { u8 tc_valid_bits; u8 tc_suspended_bits; u8 reserved[14]; __le16 qs_handles[8]; u8 reserved1[4]; __le16 port_bw_limit; u8 reserved2[2]; u8 max_bw; /* 0-3, limit = 2^max */ u8 reserved3[23]; }; I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp); /* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ struct i40e_aqc_query_vsi_ets_sla_config_resp { u8 tc_valid_bits; u8 reserved[3]; u8 share_credits[8]; __le16 credits[8]; /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ __le16 tc_bw_max[2]; }; I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp); /* Configure Switching Component Bandwidth Limit (direct 0x0410) */ struct i40e_aqc_configure_switching_comp_bw_limit { __le16 seid; u8 reserved[2]; __le16 credit; u8 reserved1[2]; u8 max_bw; /* 0-3, limit = 2^max */ u8 reserved2[7]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); /* Enable Physical Port ETS (indirect 0x0413) * Modify Physical Port ETS (indirect 0x0414) * Disable Physical Port ETS (indirect 0x0415) */ struct i40e_aqc_configure_switching_comp_ets_data { u8 reserved[4]; u8 tc_valid_bits; u8 seepage; #define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 u8 tc_strict_priority_flags; u8 reserved1[17]; u8 tc_bw_share_credits[8]; u8 reserved2[96]; }; I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data); /* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { u8 tc_valid_bits; u8 reserved[15]; __le16 tc_bw_credit[8]; /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ __le16 tc_bw_max[2]; u8 reserved1[28]; }; I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_switching_comp_ets_bw_limit_data); /* Configure Switching Component Bandwidth Allocation per Tc * (indirect 0x0417) */ struct i40e_aqc_configure_switching_comp_bw_config_data { u8 tc_valid_bits; u8 reserved[2]; u8 absolute_credits; /* bool */ u8 tc_bw_share_credits[8]; u8 reserved1[20]; }; I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data); /* Query Switching Component Configuration (indirect 0x0418) */ struct i40e_aqc_query_switching_comp_ets_config_resp { u8 tc_valid_bits; u8 reserved[35]; __le16 port_bw_limit; u8 reserved1[2]; u8 tc_bw_max; /* 0-3, limit = 2^max */ u8 reserved2[23]; }; I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp); /* Query PhysicalPort ETS Configuration (indirect 0x0419) */ struct i40e_aqc_query_port_ets_config_resp { u8 reserved[4]; u8 tc_valid_bits; u8 reserved1; u8 tc_strict_priority_bits; u8 reserved2; u8 tc_bw_share_credits[8]; __le16 tc_bw_limits[8]; /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ __le16 tc_bw_max[2]; u8 reserved3[32]; }; I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp); /* Query Switching Component Bandwidth Allocation per Traffic Type * (indirect 0x041A) */ struct i40e_aqc_query_switching_comp_bw_config_resp { u8 tc_valid_bits; u8 reserved[2]; u8 absolute_credits_enable; /* bool */ u8 tc_bw_share_credits[8]; __le16 tc_bw_limits[8]; /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ __le16 tc_bw_max[2]; }; I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp); /* Suspend/resume port TX traffic * (direct 0x041B and 0x041C) uses the generic SEID struct */ /* Configure partition BW * (indirect 0x041D) */ struct i40e_aqc_configure_partition_bw_data { __le16 pf_valid_bits; u8 min_bw[16]; /* guaranteed bandwidth */ u8 max_bw[16]; /* bandwidth limit */ }; I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data); /* Get and set the active HMC resource profile and status. * (direct 0x0500) and (direct 0x0501) */ struct i40e_aq_get_set_hmc_resource_profile { u8 pm_profile; u8 pe_vf_enabled; u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); enum i40e_aq_hmc_profile { /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ I40E_HMC_PROFILE_DEFAULT = 1, I40E_HMC_PROFILE_FAVOR_VF = 2, I40E_HMC_PROFILE_EQUAL = 3, }; /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ /* set in param0 for get phy abilities to report qualified modules */ #define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 #define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 enum i40e_aq_phy_type { I40E_PHY_TYPE_SGMII = 0x0, I40E_PHY_TYPE_1000BASE_KX = 0x1, I40E_PHY_TYPE_10GBASE_KX4 = 0x2, I40E_PHY_TYPE_10GBASE_KR = 0x3, I40E_PHY_TYPE_40GBASE_KR4 = 0x4, I40E_PHY_TYPE_XAUI = 0x5, I40E_PHY_TYPE_XFI = 0x6, I40E_PHY_TYPE_SFI = 0x7, I40E_PHY_TYPE_XLAUI = 0x8, I40E_PHY_TYPE_XLPPI = 0x9, I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA, I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, I40E_PHY_TYPE_10GBASE_AOC = 0xC, I40E_PHY_TYPE_40GBASE_AOC = 0xD, I40E_PHY_TYPE_UNRECOGNIZED = 0xE, I40E_PHY_TYPE_UNSUPPORTED = 0xF, I40E_PHY_TYPE_100BASE_TX = 0x11, I40E_PHY_TYPE_1000BASE_T = 0x12, I40E_PHY_TYPE_10GBASE_T = 0x13, I40E_PHY_TYPE_10GBASE_SR = 0x14, I40E_PHY_TYPE_10GBASE_LR = 0x15, I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16, I40E_PHY_TYPE_10GBASE_CR1 = 0x17, I40E_PHY_TYPE_40GBASE_CR4 = 0x18, I40E_PHY_TYPE_40GBASE_SR4 = 0x19, I40E_PHY_TYPE_40GBASE_LR4 = 0x1A, I40E_PHY_TYPE_1000BASE_SX = 0x1B, I40E_PHY_TYPE_1000BASE_LX = 0x1C, I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D, I40E_PHY_TYPE_20GBASE_KR2 = 0x1E, I40E_PHY_TYPE_25GBASE_KR = 0x1F, I40E_PHY_TYPE_25GBASE_CR = 0x20, I40E_PHY_TYPE_25GBASE_SR = 0x21, I40E_PHY_TYPE_25GBASE_LR = 0x22, I40E_PHY_TYPE_25GBASE_AOC = 0x23, I40E_PHY_TYPE_25GBASE_ACC = 0x24, + I40E_PHY_TYPE_2_5GBASE_T = 0x30, + I40E_PHY_TYPE_5GBASE_T = 0x31, I40E_PHY_TYPE_MAX, I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD, I40E_PHY_TYPE_EMPTY = 0xFE, I40E_PHY_TYPE_DEFAULT = 0xFF, }; #define I40E_PHY_TYPES_BITMASK (BIT_ULL(I40E_PHY_TYPE_SGMII) | \ BIT_ULL(I40E_PHY_TYPE_1000BASE_KX) | \ BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4) | \ BIT_ULL(I40E_PHY_TYPE_10GBASE_KR) | \ BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4) | \ BIT_ULL(I40E_PHY_TYPE_XAUI) | \ BIT_ULL(I40E_PHY_TYPE_XFI) | \ BIT_ULL(I40E_PHY_TYPE_SFI) | \ BIT_ULL(I40E_PHY_TYPE_XLAUI) | \ BIT_ULL(I40E_PHY_TYPE_XLPPI) | \ BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU) | \ BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU) | \ BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC) | \ BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC) | \ BIT_ULL(I40E_PHY_TYPE_UNRECOGNIZED) | \ BIT_ULL(I40E_PHY_TYPE_UNSUPPORTED) | \ BIT_ULL(I40E_PHY_TYPE_100BASE_TX) | \ BIT_ULL(I40E_PHY_TYPE_1000BASE_T) | \ BIT_ULL(I40E_PHY_TYPE_10GBASE_T) | \ BIT_ULL(I40E_PHY_TYPE_10GBASE_SR) | \ BIT_ULL(I40E_PHY_TYPE_10GBASE_LR) | \ BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU) | \ BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1) | \ BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4) | \ BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4) | \ BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4) | \ BIT_ULL(I40E_PHY_TYPE_1000BASE_SX) | \ BIT_ULL(I40E_PHY_TYPE_1000BASE_LX) | \ BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL) | \ BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2) | \ BIT_ULL(I40E_PHY_TYPE_25GBASE_KR) | \ BIT_ULL(I40E_PHY_TYPE_25GBASE_CR) | \ BIT_ULL(I40E_PHY_TYPE_25GBASE_SR) | \ BIT_ULL(I40E_PHY_TYPE_25GBASE_LR) | \ BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC) | \ - BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC)) + BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC) | \ + BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T) | \ + BIT_ULL(I40E_PHY_TYPE_5GBASE_T)) +#define I40E_LINK_SPEED_2_5GB_SHIFT 0x0 #define I40E_LINK_SPEED_100MB_SHIFT 0x1 #define I40E_LINK_SPEED_1000MB_SHIFT 0x2 #define I40E_LINK_SPEED_10GB_SHIFT 0x3 #define I40E_LINK_SPEED_40GB_SHIFT 0x4 #define I40E_LINK_SPEED_20GB_SHIFT 0x5 #define I40E_LINK_SPEED_25GB_SHIFT 0x6 +#define I40E_LINK_SPEED_5GB_SHIFT 0x7 enum i40e_aq_link_speed { I40E_LINK_SPEED_UNKNOWN = 0, I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT), I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT), + I40E_LINK_SPEED_2_5GB = (1 << I40E_LINK_SPEED_2_5GB_SHIFT), + I40E_LINK_SPEED_5GB = (1 << I40E_LINK_SPEED_5GB_SHIFT), I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT), I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT), I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT), I40E_LINK_SPEED_25GB = (1 << I40E_LINK_SPEED_25GB_SHIFT), }; struct i40e_aqc_module_desc { u8 oui[3]; u8 reserved1; u8 part_number[16]; u8 revision[4]; u8 reserved2[8]; }; I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc); struct i40e_aq_get_phy_abilities_resp { __le32 phy_type; /* bitmap using the above enum for offsets */ u8 link_speed; /* bitmap using the above enum bit patterns */ u8 abilities; #define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 #define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 #define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 #define I40E_AQ_PHY_LINK_ENABLED 0x08 #define I40E_AQ_PHY_AN_ENABLED 0x10 #define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 #define I40E_AQ_PHY_FEC_ABILITY_KR 0x40 #define I40E_AQ_PHY_FEC_ABILITY_RS 0x80 __le16 eee_capability; +#define I40E_AQ_EEE_AUTO 0x0001 #define I40E_AQ_EEE_100BASE_TX 0x0002 #define I40E_AQ_EEE_1000BASE_T 0x0004 #define I40E_AQ_EEE_10GBASE_T 0x0008 #define I40E_AQ_EEE_1000BASE_KX 0x0010 #define I40E_AQ_EEE_10GBASE_KX4 0x0020 #define I40E_AQ_EEE_10GBASE_KR 0x0040 +#define I40E_AQ_EEE_2_5GBASE_T 0x0100 +#define I40E_AQ_EEE_5GBASE_T 0x0200 __le32 eeer_val; u8 d3_lpan; #define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 u8 phy_type_ext; #define I40E_AQ_PHY_TYPE_EXT_25G_KR 0x01 #define I40E_AQ_PHY_TYPE_EXT_25G_CR 0x02 #define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04 #define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08 #define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10 #define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20 +#define I40E_AQ_PHY_TYPE_EXT_2_5GBASE_T 0x40 +#define I40E_AQ_PHY_TYPE_EXT_5GBASE_T 0x80 u8 fec_cfg_curr_mod_ext_info; #define I40E_AQ_ENABLE_FEC_KR 0x01 #define I40E_AQ_ENABLE_FEC_RS 0x02 #define I40E_AQ_REQUEST_FEC_KR 0x04 #define I40E_AQ_REQUEST_FEC_RS 0x08 #define I40E_AQ_ENABLE_FEC_AUTO 0x10 #define I40E_AQ_FEC #define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0 #define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5 u8 ext_comp_code; u8 phy_id[4]; u8 module_type[3]; u8 qualified_module_count; #define I40E_AQ_PHY_MAX_QMS 16 struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; }; I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp); /* Set PHY Config (direct 0x0601) */ struct i40e_aq_set_phy_config { /* same bits as above in all */ __le32 phy_type; u8 link_speed; u8 abilities; /* bits 0-2 use the values from get_phy_abilities_resp */ #define I40E_AQ_PHY_ENABLE_LINK 0x08 #define I40E_AQ_PHY_ENABLE_AN 0x10 #define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 __le16 eee_capability; __le32 eeer; u8 low_power_ctrl; u8 phy_type_ext; u8 fec_config; #define I40E_AQ_SET_FEC_ABILITY_KR BIT(0) #define I40E_AQ_SET_FEC_ABILITY_RS BIT(1) #define I40E_AQ_SET_FEC_REQUEST_KR BIT(2) #define I40E_AQ_SET_FEC_REQUEST_RS BIT(3) #define I40E_AQ_SET_FEC_AUTO BIT(4) #define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0 #define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT) u8 reserved; }; I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); /* Set MAC Config command data structure (direct 0x0603) */ struct i40e_aq_set_mac_config { __le16 max_frame_size; u8 params; #define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 #define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 #define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 #define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 #define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF #define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 #define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 #define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 #define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 #define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 #define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 #define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 #define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 #define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 u8 tx_timer_priority; /* bitmap */ __le16 tx_timer_value; __le16 fc_refresh_threshold; u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); /* Restart Auto-Negotiation (direct 0x605) */ struct i40e_aqc_set_link_restart_an { u8 command; #define I40E_AQ_PHY_RESTART_AN 0x02 #define I40E_AQ_PHY_LINK_ENABLE 0x04 u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); /* Get Link Status cmd & response data structure (direct 0x0607) */ struct i40e_aqc_get_link_status { __le16 command_flags; /* only field set on command */ #define I40E_AQ_LSE_MASK 0x3 #define I40E_AQ_LSE_NOP 0x0 #define I40E_AQ_LSE_DISABLE 0x2 #define I40E_AQ_LSE_ENABLE 0x3 /* only response uses this flag */ #define I40E_AQ_LSE_IS_ENABLED 0x1 u8 phy_type; /* i40e_aq_phy_type */ u8 link_speed; /* i40e_aq_link_speed */ u8 link_info; #define I40E_AQ_LINK_UP 0x01 /* obsolete */ #define I40E_AQ_LINK_UP_FUNCTION 0x01 #define I40E_AQ_LINK_FAULT 0x02 #define I40E_AQ_LINK_FAULT_TX 0x04 #define I40E_AQ_LINK_FAULT_RX 0x08 #define I40E_AQ_LINK_FAULT_REMOTE 0x10 #define I40E_AQ_LINK_UP_PORT 0x20 #define I40E_AQ_MEDIA_AVAILABLE 0x40 #define I40E_AQ_SIGNAL_DETECT 0x80 u8 an_info; #define I40E_AQ_AN_COMPLETED 0x01 #define I40E_AQ_LP_AN_ABILITY 0x02 #define I40E_AQ_PD_FAULT 0x04 #define I40E_AQ_FEC_EN 0x08 #define I40E_AQ_PHY_LOW_POWER 0x10 #define I40E_AQ_LINK_PAUSE_TX 0x20 #define I40E_AQ_LINK_PAUSE_RX 0x40 #define I40E_AQ_QUALIFIED_MODULE 0x80 u8 ext_info; #define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 #define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 #define I40E_AQ_LINK_TX_SHIFT 0x02 #define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) #define I40E_AQ_LINK_TX_ACTIVE 0x00 #define I40E_AQ_LINK_TX_DRAINED 0x01 #define I40E_AQ_LINK_TX_FLUSHED 0x03 #define I40E_AQ_LINK_FORCED_40G 0x10 /* 25G Error Codes */ #define I40E_AQ_25G_NO_ERR 0X00 #define I40E_AQ_25G_NOT_PRESENT 0X01 #define I40E_AQ_25G_NVM_CRC_ERR 0X02 #define I40E_AQ_25G_SBUS_UCODE_ERR 0X03 #define I40E_AQ_25G_SERDES_UCODE_ERR 0X04 #define I40E_AQ_25G_NIMB_UCODE_ERR 0X05 u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ /* Since firmware API 1.7 loopback field keeps power class info as well */ #define I40E_AQ_LOOPBACK_MASK 0x07 #define I40E_AQ_PWR_CLASS_SHIFT_LB 6 #define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB) __le16 max_frame_size; u8 config; #define I40E_AQ_CONFIG_FEC_KR_ENA 0x01 #define I40E_AQ_CONFIG_FEC_RS_ENA 0x02 #define I40E_AQ_CONFIG_CRC_ENA 0x04 #define I40E_AQ_CONFIG_PACING_MASK 0x78 union { struct { u8 power_desc; #define I40E_AQ_LINK_POWER_CLASS_1 0x00 #define I40E_AQ_LINK_POWER_CLASS_2 0x01 #define I40E_AQ_LINK_POWER_CLASS_3 0x02 #define I40E_AQ_LINK_POWER_CLASS_4 0x03 #define I40E_AQ_PWR_CLASS_MASK 0x03 u8 reserved[4]; }; struct { u8 link_type[4]; u8 link_type_ext; }; }; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); /* Set event mask command (direct 0x613) */ struct i40e_aqc_set_phy_int_mask { u8 reserved[8]; __le16 event_mask; #define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 #define I40E_AQ_EVENT_MEDIA_NA 0x0004 #define I40E_AQ_EVENT_LINK_FAULT 0x0008 #define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 #define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 #define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 #define I40E_AQ_EVENT_AN_COMPLETED 0x0080 #define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 #define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 u8 reserved1[6]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); /* Get Local AN advt register (direct 0x0614) * Set Local AN advt register (direct 0x0615) * Get Link Partner AN advt register (direct 0x0616) */ struct i40e_aqc_an_advt_reg { __le32 local_an_reg0; __le16 local_an_reg1; u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); /* Set Loopback mode (0x0618) */ struct i40e_aqc_set_lb_mode { u8 lb_level; #define I40E_AQ_LB_NONE 0 #define I40E_AQ_LB_MAC 1 #define I40E_AQ_LB_SERDES 2 #define I40E_AQ_LB_PHY_INT 3 #define I40E_AQ_LB_PHY_EXT 4 #define I40E_AQ_LB_CPVL_PCS 5 #define I40E_AQ_LB_CPVL_EXT 6 #define I40E_AQ_LB_PHY_LOCAL 0x01 #define I40E_AQ_LB_PHY_REMOTE 0x02 #define I40E_AQ_LB_MAC_LOCAL 0x04 u8 lb_type; #define I40E_AQ_LB_LOCAL 0 #define I40E_AQ_LB_FAR 0x01 u8 speed; #define I40E_AQ_LB_SPEED_NONE 0 #define I40E_AQ_LB_SPEED_1G 1 #define I40E_AQ_LB_SPEED_10G 2 #define I40E_AQ_LB_SPEED_40G 3 #define I40E_AQ_LB_SPEED_20G 4 u8 force_speed; u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); /* Set PHY Debug command (0x0622) */ struct i40e_aqc_set_phy_debug { u8 command_flags; #define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT) #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 /* Disable link manageability on a single port */ #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 /* Disable link manageability on all ports needs both bits 4 and 5 */ #define I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW 0x20 u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); enum i40e_aq_phy_reg_type { I40E_AQC_PHY_REG_INTERNAL = 0x1, I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 }; +#pragma pack(1) /* Run PHY Activity (0x0626) */ struct i40e_aqc_run_phy_activity { - __le16 activity_id; - u8 flags; - u8 reserved1; - __le32 control; - __le32 data; - u8 reserved2[4]; + u8 cmd_flags; + __le16 activity_id; +#define I40E_AQ_RUN_PHY_ACTIVITY_ACTIVITY_ID_USER_DEFINED 0x10 + u8 reserved; + union { + struct { + __le32 dnl_opcode; +#define I40E_AQ_RUN_PHY_ACTIVITY_DNL_OPCODE_GET_EEE_STATISTICS 0x801b + __le32 data; + u8 reserved2[4]; + } cmd; + struct { + __le32 cmd_status; +#define I40E_AQ_RUN_PHY_ACTIVITY_CMD_STATUS_SUCCESS 0x4 + __le32 data0; + __le32 data1; + } resp; + } params; }; +#pragma pack() I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity); /* Set PHY Register command (0x0628) */ /* Get PHY Register command (0x0629) */ struct i40e_aqc_phy_register_access { u8 phy_interface; #define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0 #define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1 #define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2 u8 dev_addres; - u8 reserved1[2]; + u8 cmd_flags; +#define I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE 1 + u8 reserved1; __le32 reg_address; __le32 reg_value; u8 reserved2[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access); /* NVM Read command (indirect 0x0701) * NVM Erase commands (direct 0x0702) * NVM Update commands (indirect 0x0703) */ struct i40e_aqc_nvm_update { u8 command_flags; #define I40E_AQ_NVM_LAST_CMD 0x01 +#define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20 +#define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40 #define I40E_AQ_NVM_FLASH_ONLY 0x80 #define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1 #define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03 #define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03 #define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01 u8 module_pointer; __le16 length; __le32 offset; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); /* NVM Config Read (indirect 0x0704) */ struct i40e_aqc_nvm_config_read { __le16 cmd_flags; #define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 #define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0 #define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1 __le16 element_count; __le16 element_id; /* Feature/field ID */ __le16 element_id_msw; /* MSWord of field ID */ __le32 address_high; __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); /* NVM Config Write (indirect 0x0705) */ struct i40e_aqc_nvm_config_write { __le16 cmd_flags; __le16 element_count; u8 reserved[4]; __le32 address_high; __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); /* Used for 0x0704 as well as for 0x0705 commands */ #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1 #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \ (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) #define I40E_AQ_ANVM_FEATURE 0 #define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT) struct i40e_aqc_nvm_config_data_feature { __le16 feature_id; #define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01 #define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08 #define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10 __le16 feature_options; __le16 feature_selection; }; I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature); struct i40e_aqc_nvm_config_data_immediate_field { __le32 field_id; __le32 field_value; __le16 field_options; __le16 reserved; }; I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field); /* OEM Post Update (indirect 0x0720) * no command data struct used */ struct i40e_aqc_nvm_oem_post_update { #define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01 u8 sel_data; u8 reserved[7]; }; I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update); struct i40e_aqc_nvm_oem_post_update_buffer { u8 str_len; u8 dev_addr; __le16 eeprom_addr; u8 data[36]; }; I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); /* Thermal Sensor (indirect 0x0721) * read or set thermal sensor configs and values * takes a sensor and command specific data buffer, not detailed here */ struct i40e_aqc_thermal_sensor { u8 sensor_action; #define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0 #define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1 #define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2 u8 reserved[7]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_thermal_sensor); /* Send to PF command (indirect 0x0801) id is only used by PF * Send to VF command (indirect 0x0802) id is only used by PF * Send to Peer PF command (indirect 0x0803) */ struct i40e_aqc_pf_vf_message { __le32 id; u8 reserved[4]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); /* Alternate structure */ /* Direct write (direct 0x0900) * Direct read (direct 0x0902) */ struct i40e_aqc_alternate_write { __le32 address0; __le32 data0; __le32 address1; __le32 data1; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write); /* Indirect write (indirect 0x0901) * Indirect read (indirect 0x0903) */ struct i40e_aqc_alternate_ind_write { __le32 address; __le32 length; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); /* Done alternate write (direct 0x0904) * uses i40e_aq_desc */ struct i40e_aqc_alternate_write_done { __le16 cmd_flags; #define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 #define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 #define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 #define I40E_AQ_ALTERNATE_RESET_NEEDED 2 u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); /* Set OEM mode (direct 0x0905) */ struct i40e_aqc_alternate_set_mode { __le32 mode; #define I40E_AQ_ALTERNATE_MODE_NONE 0 #define I40E_AQ_ALTERNATE_MODE_OEM 1 u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); /* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */ /* async events 0x10xx */ /* Lan Queue Overflow Event (direct, 0x1001) */ struct i40e_aqc_lan_overflow { __le32 prtdcb_rupto; __le32 otx_ctl; u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); /* Get LLDP MIB (indirect 0x0A00) */ struct i40e_aqc_lldp_get_mib { u8 type; u8 reserved1; #define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 #define I40E_AQ_LLDP_MIB_LOCAL 0x0 #define I40E_AQ_LLDP_MIB_REMOTE 0x1 #define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 #define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC #define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 #define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 #define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 #define I40E_AQ_LLDP_TX_SHIFT 0x4 #define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) /* TX pause flags use I40E_AQ_LINK_TX_* above */ __le16 local_len; __le16 remote_len; u8 reserved2[2]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); /* Configure LLDP MIB Change Event (direct 0x0A01) * also used for the event (with type in the command field) */ struct i40e_aqc_lldp_update_mib { u8 command; #define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 #define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 u8 reserved[7]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); /* Add LLDP TLV (indirect 0x0A02) * Delete LLDP TLV (indirect 0x0A04) */ struct i40e_aqc_lldp_add_tlv { u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ u8 reserved1[1]; __le16 len; u8 reserved2[4]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); /* Update LLDP TLV (indirect 0x0A03) */ struct i40e_aqc_lldp_update_tlv { u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ u8 reserved; __le16 old_len; __le16 new_offset; __le16 new_len; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); /* Stop LLDP (direct 0x0A05) */ struct i40e_aqc_lldp_stop { u8 command; -#define I40E_AQ_LLDP_AGENT_STOP 0x0 -#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 +#define I40E_AQ_LLDP_AGENT_STOP 0x0 +#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 +#define I40E_AQ_LLDP_AGENT_STOP_PERSIST 0x2 u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); /* Start LLDP (direct 0x0A06) */ - struct i40e_aqc_lldp_start { u8 command; -#define I40E_AQ_LLDP_AGENT_START 0x1 +#define I40E_AQ_LLDP_AGENT_START 0x1 +#define I40E_AQ_LLDP_AGENT_START_PERSIST 0x2 u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); /* Set DCB (direct 0x0303) */ struct i40e_aqc_set_dcb_parameters { u8 command; #define I40E_AQ_DCB_SET_AGENT 0x1 #define I40E_DCB_VALID 0x1 u8 valid_flags; u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters); /* Get CEE DCBX Oper Config (0x0A07) * uses the generic descriptor struct * returns below as indirect response */ #define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0 #define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT) #define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3 #define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT) #define I40E_AQC_CEE_APP_FIP_SHIFT 0x8 #define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT) #define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0 #define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT) #define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3 #define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT) #define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8 #define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT) #define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8 #define I40E_AQC_CEE_FCOE_STATUS_MASK (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT) #define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xB #define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT) #define I40E_AQC_CEE_FIP_STATUS_SHIFT 0x10 #define I40E_AQC_CEE_FIP_STATUS_MASK (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT) /* struct i40e_aqc_get_cee_dcb_cfg_v1_resp was originally defined with * word boundary layout issues, which the Linux compilers silently deal * with by adding padding, making the actual struct larger than designed. * However, the FW compiler for the NIC is less lenient and complains * about the struct. Hence, the struct defined here has an extra byte in * fields reserved3 and reserved4 to directly acknowledge that padding, * and the new length is used in the length check macro. */ struct i40e_aqc_get_cee_dcb_cfg_v1_resp { u8 reserved1; u8 oper_num_tc; u8 oper_prio_tc[4]; u8 reserved2; u8 oper_tc_bw[8]; u8 oper_pfc_en; u8 reserved3[2]; __le16 oper_app_prio; u8 reserved4[2]; __le16 tlv_status; }; I40E_CHECK_STRUCT_LEN(0x18, i40e_aqc_get_cee_dcb_cfg_v1_resp); struct i40e_aqc_get_cee_dcb_cfg_resp { u8 oper_num_tc; u8 oper_prio_tc[4]; u8 oper_tc_bw[8]; u8 oper_pfc_en; __le16 oper_app_prio; __le32 tlv_status; u8 reserved[12]; }; I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp); /* Set Local LLDP MIB (indirect 0x0A08) * Used to replace the local MIB of a given LLDP agent. e.g. DCBx */ struct i40e_aqc_lldp_set_local_mib { #define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0 #define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \ SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT) #define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0 #define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1) #define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT) #define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1 u8 type; u8 reserved0; __le16 length; u8 reserved1[4]; __le32 address_high; __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib); struct i40e_aqc_lldp_set_local_mib_resp { #define SET_LOCAL_MIB_RESP_EVENT_TRIGGERED_MASK 0x01 u8 status; u8 reserved[15]; }; I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_lldp_set_local_mib_resp); /* Stop/Start LLDP Agent (direct 0x0A09) * Used for stopping/starting specific LLDP agent. e.g. DCBx */ struct i40e_aqc_lldp_stop_start_specific_agent { #define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0 #define I40E_AQC_START_SPECIFIC_AGENT_MASK \ (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT) u8 command; u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent); + +/* Restore LLDP Agent factory settings (direct 0x0A0A) */ +struct i40e_aqc_lldp_restore { + u8 command; +#define I40E_AQ_LLDP_AGENT_RESTORE_NOT 0x0 +#define I40E_AQ_LLDP_AGENT_RESTORE 0x1 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_restore); /* Add Udp Tunnel command and completion (direct 0x0B00) */ struct i40e_aqc_add_udp_tunnel { __le16 udp_port; u8 reserved0[3]; u8 protocol_type; #define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 #define I40E_AQC_TUNNEL_TYPE_NGE 0x01 #define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 #define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11 u8 reserved1[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); struct i40e_aqc_add_udp_tunnel_completion { __le16 udp_port; u8 filter_entry_index; u8 multiple_pfs; #define I40E_AQC_SINGLE_PF 0x0 #define I40E_AQC_MULTIPLE_PFS 0x1 u8 total_filters; u8 reserved[11]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); /* remove UDP Tunnel command (0x0B01) */ struct i40e_aqc_remove_udp_tunnel { u8 reserved[2]; u8 index; /* 0 to 15 */ u8 reserved2[13]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); struct i40e_aqc_del_udp_tunnel_completion { __le16 udp_port; u8 index; /* 0 to 15 */ u8 multiple_pfs; u8 total_filters_used; u8 reserved1[11]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); struct i40e_aqc_get_set_rss_key { #define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15) #define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 #define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) __le16 vsi_id; u8 reserved[6]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key); struct i40e_aqc_get_set_rss_key_data { u8 standard_rss_key[0x28]; u8 extended_hash_key[0xc]; }; I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); struct i40e_aqc_get_set_rss_lut { #define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15) #define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 #define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) __le16 vsi_id; #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 __le16 flags; u8 reserved[4]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut); /* tunnel key structure 0x0B10 */ struct i40e_aqc_tunnel_key_structure { u8 key1_off; u8 key2_off; u8 key1_len; /* 0 to 15 */ u8 key2_len; /* 0 to 15 */ u8 flags; #define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 /* response flags */ #define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 #define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 #define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 u8 network_key_index; #define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 #define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 #define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2 #define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3 u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); /* OEM mode commands (direct 0xFE0x) */ struct i40e_aqc_oem_param_change { __le32 param_type; #define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 #define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 #define I40E_AQ_OEM_PARAM_MAC 2 __le32 param_value1; __le16 param_value2; u8 reserved[6]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); struct i40e_aqc_oem_state_change { __le32 state; #define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 #define I40E_AQ_OEM_STATE_LINK_UP 0x1 u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); /* Initialize OCSD (0xFE02, direct) */ struct i40e_aqc_opc_oem_ocsd_initialize { u8 type_status; u8 reserved1[3]; __le32 ocsd_memory_block_addr_high; __le32 ocsd_memory_block_addr_low; __le32 requested_update_interval; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize); /* Initialize OCBB (0xFE03, direct) */ struct i40e_aqc_opc_oem_ocbb_initialize { u8 type_status; u8 reserved1[3]; __le32 ocbb_memory_block_addr_high; __le32 ocbb_memory_block_addr_low; u8 reserved2[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize); /* debug commands */ /* get device id (0xFF00) uses the generic structure */ /* set test more (0xFF01, internal) */ struct i40e_acq_set_test_mode { u8 mode; #define I40E_AQ_TEST_PARTIAL 0 #define I40E_AQ_TEST_FULL 1 #define I40E_AQ_TEST_NVM 2 u8 reserved[3]; u8 command; #define I40E_AQ_TEST_OPEN 0 #define I40E_AQ_TEST_CLOSE 1 #define I40E_AQ_TEST_INC 2 u8 reserved2[3]; __le32 address_high; __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); /* Debug Read Register command (0xFF03) * Debug Write Register command (0xFF04) */ struct i40e_aqc_debug_reg_read_write { __le32 reserved; __le32 address; __le32 value_high; __le32 value_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write); /* Scatter/gather Reg Read (indirect 0xFF05) * Scatter/gather Reg Write (indirect 0xFF06) */ /* i40e_aq_desc is used for the command */ struct i40e_aqc_debug_reg_sg_element_data { __le32 address; __le32 value; }; /* Debug Modify register (direct 0xFF07) */ struct i40e_aqc_debug_modify_reg { __le32 address; __le32 value; __le32 clear_mask; __le32 set_mask; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); /* dump internal data (0xFF08, indirect) */ #define I40E_AQ_CLUSTER_ID_AUX 0 #define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1 #define I40E_AQ_CLUSTER_ID_TXSCHED 2 #define I40E_AQ_CLUSTER_ID_HMC 3 #define I40E_AQ_CLUSTER_ID_MAC0 4 #define I40E_AQ_CLUSTER_ID_MAC1 5 #define I40E_AQ_CLUSTER_ID_MAC2 6 #define I40E_AQ_CLUSTER_ID_MAC3 7 #define I40E_AQ_CLUSTER_ID_DCB 8 #define I40E_AQ_CLUSTER_ID_EMP_MEM 9 #define I40E_AQ_CLUSTER_ID_PKT_BUF 10 #define I40E_AQ_CLUSTER_ID_ALTRAM 11 struct i40e_aqc_debug_dump_internals { u8 cluster_id; u8 table_id; __le16 data_size; __le32 idx; __le32 address_high; __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); struct i40e_aqc_debug_modify_internals { u8 cluster_id; u8 cluster_specific_params[7]; __le32 address_high; __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); #endif /* _I40E_ADMINQ_CMD_H_ */ Index: releng/11.3/sys/dev/ixl/i40e_alloc.h =================================================================== --- releng/11.3/sys/dev/ixl/i40e_alloc.h (revision 349180) +++ releng/11.3/sys/dev/ixl/i40e_alloc.h (revision 349181) @@ -1,66 +1,66 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _I40E_ALLOC_H_ #define _I40E_ALLOC_H_ struct i40e_hw; /* Memory allocation types */ enum i40e_memory_type { i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */ i40e_mem_asq_buf = 1, i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */ i40e_mem_arq_ring = 3, /* ARQ descriptor ring */ i40e_mem_atq_ring = 4, /* ATQ descriptor ring */ i40e_mem_pd = 5, /* Page Descriptor */ i40e_mem_bp = 6, /* Backing Page - 4KB */ i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */ i40e_mem_reserved }; /* prototype for functions used for dynamic memory allocation */ enum i40e_status_code i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem, enum i40e_memory_type type, u64 size, u32 alignment); enum i40e_status_code i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem); enum i40e_status_code i40e_allocate_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem, u32 size); enum i40e_status_code i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem); #endif /* _I40E_ALLOC_H_ */ Index: releng/11.3/sys/dev/ixl/i40e_common.c =================================================================== --- releng/11.3/sys/dev/ixl/i40e_common.c (revision 349180) +++ releng/11.3/sys/dev/ixl/i40e_common.c (revision 349181) @@ -1,7182 +1,7371 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "i40e_type.h" #include "i40e_adminq.h" #include "i40e_prototype.h" #include "virtchnl.h" - /** * i40e_set_mac_type - Sets MAC type * @hw: pointer to the HW structure * * This function sets the mac type of the adapter based on the * vendor ID and device ID stored in the hw structure. **/ enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw) { enum i40e_status_code status = I40E_SUCCESS; DEBUGFUNC("i40e_set_mac_type\n"); if (hw->vendor_id == I40E_INTEL_VENDOR_ID) { switch (hw->device_id) { case I40E_DEV_ID_SFP_XL710: case I40E_DEV_ID_QEMU: case I40E_DEV_ID_KX_B: case I40E_DEV_ID_KX_C: case I40E_DEV_ID_QSFP_A: case I40E_DEV_ID_QSFP_B: case I40E_DEV_ID_QSFP_C: case I40E_DEV_ID_10G_BASE_T: case I40E_DEV_ID_10G_BASE_T4: + case I40E_DEV_ID_10G_BASE_T_BC: + case I40E_DEV_ID_10G_B: + case I40E_DEV_ID_10G_SFP: case I40E_DEV_ID_20G_KR2: case I40E_DEV_ID_20G_KR2_A: case I40E_DEV_ID_25G_B: case I40E_DEV_ID_25G_SFP28: + case I40E_DEV_ID_X710_N3000: + case I40E_DEV_ID_XXV710_N3000: hw->mac.type = I40E_MAC_XL710; break; case I40E_DEV_ID_KX_X722: case I40E_DEV_ID_QSFP_X722: case I40E_DEV_ID_SFP_X722: case I40E_DEV_ID_1G_BASE_T_X722: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_SFP_I_X722: hw->mac.type = I40E_MAC_X722; break; case I40E_DEV_ID_X722_VF: hw->mac.type = I40E_MAC_X722_VF; break; case I40E_DEV_ID_VF: case I40E_DEV_ID_VF_HV: case I40E_DEV_ID_ADAPTIVE_VF: hw->mac.type = I40E_MAC_VF; break; default: hw->mac.type = I40E_MAC_GENERIC; break; } } else { status = I40E_ERR_DEVICE_NOT_SUPPORTED; } DEBUGOUT2("i40e_set_mac_type found mac: %d, returns: %d\n", hw->mac.type, status); return status; } /** * i40e_aq_str - convert AQ err code to a string * @hw: pointer to the HW structure * @aq_err: the AQ error code to convert **/ const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) { switch (aq_err) { case I40E_AQ_RC_OK: return "OK"; case I40E_AQ_RC_EPERM: return "I40E_AQ_RC_EPERM"; case I40E_AQ_RC_ENOENT: return "I40E_AQ_RC_ENOENT"; case I40E_AQ_RC_ESRCH: return "I40E_AQ_RC_ESRCH"; case I40E_AQ_RC_EINTR: return "I40E_AQ_RC_EINTR"; case I40E_AQ_RC_EIO: return "I40E_AQ_RC_EIO"; case I40E_AQ_RC_ENXIO: return "I40E_AQ_RC_ENXIO"; case I40E_AQ_RC_E2BIG: return "I40E_AQ_RC_E2BIG"; case I40E_AQ_RC_EAGAIN: return "I40E_AQ_RC_EAGAIN"; case I40E_AQ_RC_ENOMEM: return "I40E_AQ_RC_ENOMEM"; case I40E_AQ_RC_EACCES: return "I40E_AQ_RC_EACCES"; case I40E_AQ_RC_EFAULT: return "I40E_AQ_RC_EFAULT"; case I40E_AQ_RC_EBUSY: return "I40E_AQ_RC_EBUSY"; case I40E_AQ_RC_EEXIST: return "I40E_AQ_RC_EEXIST"; case I40E_AQ_RC_EINVAL: return "I40E_AQ_RC_EINVAL"; case I40E_AQ_RC_ENOTTY: return "I40E_AQ_RC_ENOTTY"; case I40E_AQ_RC_ENOSPC: return "I40E_AQ_RC_ENOSPC"; case I40E_AQ_RC_ENOSYS: return "I40E_AQ_RC_ENOSYS"; case I40E_AQ_RC_ERANGE: return "I40E_AQ_RC_ERANGE"; case I40E_AQ_RC_EFLUSHED: return "I40E_AQ_RC_EFLUSHED"; case I40E_AQ_RC_BAD_ADDR: return "I40E_AQ_RC_BAD_ADDR"; case I40E_AQ_RC_EMODE: return "I40E_AQ_RC_EMODE"; case I40E_AQ_RC_EFBIG: return "I40E_AQ_RC_EFBIG"; } snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); return hw->err_str; } /** * i40e_stat_str - convert status err code to a string * @hw: pointer to the HW structure * @stat_err: the status error code to convert **/ const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err) { switch (stat_err) { case I40E_SUCCESS: return "OK"; case I40E_ERR_NVM: return "I40E_ERR_NVM"; case I40E_ERR_NVM_CHECKSUM: return "I40E_ERR_NVM_CHECKSUM"; case I40E_ERR_PHY: return "I40E_ERR_PHY"; case I40E_ERR_CONFIG: return "I40E_ERR_CONFIG"; case I40E_ERR_PARAM: return "I40E_ERR_PARAM"; case I40E_ERR_MAC_TYPE: return "I40E_ERR_MAC_TYPE"; case I40E_ERR_UNKNOWN_PHY: return "I40E_ERR_UNKNOWN_PHY"; case I40E_ERR_LINK_SETUP: return "I40E_ERR_LINK_SETUP"; case I40E_ERR_ADAPTER_STOPPED: return "I40E_ERR_ADAPTER_STOPPED"; case I40E_ERR_INVALID_MAC_ADDR: return "I40E_ERR_INVALID_MAC_ADDR"; case I40E_ERR_DEVICE_NOT_SUPPORTED: return "I40E_ERR_DEVICE_NOT_SUPPORTED"; case I40E_ERR_MASTER_REQUESTS_PENDING: return "I40E_ERR_MASTER_REQUESTS_PENDING"; case I40E_ERR_INVALID_LINK_SETTINGS: return "I40E_ERR_INVALID_LINK_SETTINGS"; case I40E_ERR_AUTONEG_NOT_COMPLETE: return "I40E_ERR_AUTONEG_NOT_COMPLETE"; case I40E_ERR_RESET_FAILED: return "I40E_ERR_RESET_FAILED"; case I40E_ERR_SWFW_SYNC: return "I40E_ERR_SWFW_SYNC"; case I40E_ERR_NO_AVAILABLE_VSI: return "I40E_ERR_NO_AVAILABLE_VSI"; case I40E_ERR_NO_MEMORY: return "I40E_ERR_NO_MEMORY"; case I40E_ERR_BAD_PTR: return "I40E_ERR_BAD_PTR"; case I40E_ERR_RING_FULL: return "I40E_ERR_RING_FULL"; case I40E_ERR_INVALID_PD_ID: return "I40E_ERR_INVALID_PD_ID"; case I40E_ERR_INVALID_QP_ID: return "I40E_ERR_INVALID_QP_ID"; case I40E_ERR_INVALID_CQ_ID: return "I40E_ERR_INVALID_CQ_ID"; case I40E_ERR_INVALID_CEQ_ID: return "I40E_ERR_INVALID_CEQ_ID"; case I40E_ERR_INVALID_AEQ_ID: return "I40E_ERR_INVALID_AEQ_ID"; case I40E_ERR_INVALID_SIZE: return "I40E_ERR_INVALID_SIZE"; case I40E_ERR_INVALID_ARP_INDEX: return "I40E_ERR_INVALID_ARP_INDEX"; case I40E_ERR_INVALID_FPM_FUNC_ID: return "I40E_ERR_INVALID_FPM_FUNC_ID"; case I40E_ERR_QP_INVALID_MSG_SIZE: return "I40E_ERR_QP_INVALID_MSG_SIZE"; case I40E_ERR_QP_TOOMANY_WRS_POSTED: return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; case I40E_ERR_INVALID_FRAG_COUNT: return "I40E_ERR_INVALID_FRAG_COUNT"; case I40E_ERR_QUEUE_EMPTY: return "I40E_ERR_QUEUE_EMPTY"; case I40E_ERR_INVALID_ALIGNMENT: return "I40E_ERR_INVALID_ALIGNMENT"; case I40E_ERR_FLUSHED_QUEUE: return "I40E_ERR_FLUSHED_QUEUE"; case I40E_ERR_INVALID_PUSH_PAGE_INDEX: return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; case I40E_ERR_INVALID_IMM_DATA_SIZE: return "I40E_ERR_INVALID_IMM_DATA_SIZE"; case I40E_ERR_TIMEOUT: return "I40E_ERR_TIMEOUT"; case I40E_ERR_OPCODE_MISMATCH: return "I40E_ERR_OPCODE_MISMATCH"; case I40E_ERR_CQP_COMPL_ERROR: return "I40E_ERR_CQP_COMPL_ERROR"; case I40E_ERR_INVALID_VF_ID: return "I40E_ERR_INVALID_VF_ID"; case I40E_ERR_INVALID_HMCFN_ID: return "I40E_ERR_INVALID_HMCFN_ID"; case I40E_ERR_BACKING_PAGE_ERROR: return "I40E_ERR_BACKING_PAGE_ERROR"; case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; case I40E_ERR_INVALID_PBLE_INDEX: return "I40E_ERR_INVALID_PBLE_INDEX"; case I40E_ERR_INVALID_SD_INDEX: return "I40E_ERR_INVALID_SD_INDEX"; case I40E_ERR_INVALID_PAGE_DESC_INDEX: return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; case I40E_ERR_INVALID_SD_TYPE: return "I40E_ERR_INVALID_SD_TYPE"; case I40E_ERR_MEMCPY_FAILED: return "I40E_ERR_MEMCPY_FAILED"; case I40E_ERR_INVALID_HMC_OBJ_INDEX: return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; case I40E_ERR_INVALID_HMC_OBJ_COUNT: return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; case I40E_ERR_INVALID_SRQ_ARM_LIMIT: return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; case I40E_ERR_SRQ_ENABLED: return "I40E_ERR_SRQ_ENABLED"; case I40E_ERR_ADMIN_QUEUE_ERROR: return "I40E_ERR_ADMIN_QUEUE_ERROR"; case I40E_ERR_ADMIN_QUEUE_TIMEOUT: return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; case I40E_ERR_BUF_TOO_SHORT: return "I40E_ERR_BUF_TOO_SHORT"; case I40E_ERR_ADMIN_QUEUE_FULL: return "I40E_ERR_ADMIN_QUEUE_FULL"; case I40E_ERR_ADMIN_QUEUE_NO_WORK: return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; case I40E_ERR_BAD_IWARP_CQE: return "I40E_ERR_BAD_IWARP_CQE"; case I40E_ERR_NVM_BLANK_MODE: return "I40E_ERR_NVM_BLANK_MODE"; case I40E_ERR_NOT_IMPLEMENTED: return "I40E_ERR_NOT_IMPLEMENTED"; case I40E_ERR_PE_DOORBELL_NOT_ENABLED: return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; case I40E_ERR_DIAG_TEST_FAILED: return "I40E_ERR_DIAG_TEST_FAILED"; case I40E_ERR_NOT_READY: return "I40E_ERR_NOT_READY"; case I40E_NOT_SUPPORTED: return "I40E_NOT_SUPPORTED"; case I40E_ERR_FIRMWARE_API_VERSION: return "I40E_ERR_FIRMWARE_API_VERSION"; case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; } snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); return hw->err_str; } /** * i40e_debug_aq * @hw: debug mask related to admin queue * @mask: debug mask * @desc: pointer to admin queue descriptor * @buffer: pointer to command buffer * @buf_len: max length of buffer * * Dumps debug log about adminq command with descriptor contents. **/ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, void *buffer, u16 buf_len) { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; + u32 effective_mask = hw->debug_mask & mask; u8 *buf = (u8 *)buffer; u16 len; - u16 i = 0; + u16 i; - if ((!(mask & hw->debug_mask)) || (desc == NULL)) + if (!effective_mask || !desc) return; len = LE16_TO_CPU(aq_desc->datalen); - i40e_debug(hw, mask, + i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", LE16_TO_CPU(aq_desc->opcode), LE16_TO_CPU(aq_desc->flags), LE16_TO_CPU(aq_desc->datalen), LE16_TO_CPU(aq_desc->retval)); - i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", + i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, + "\tcookie (h,l) 0x%08X 0x%08X\n", LE32_TO_CPU(aq_desc->cookie_high), LE32_TO_CPU(aq_desc->cookie_low)); - i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", + i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, + "\tparam (0,1) 0x%08X 0x%08X\n", LE32_TO_CPU(aq_desc->params.internal.param0), LE32_TO_CPU(aq_desc->params.internal.param1)); - i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", + i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, + "\taddr (h,l) 0x%08X 0x%08X\n", LE32_TO_CPU(aq_desc->params.external.addr_high), LE32_TO_CPU(aq_desc->params.external.addr_low)); - if ((buffer != NULL) && (aq_desc->datalen != 0)) { + if (buffer && (buf_len != 0) && (len != 0) && + (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) { i40e_debug(hw, mask, "AQ CMD Buffer:\n"); if (buf_len < len) len = buf_len; /* write the full 16-byte chunks */ for (i = 0; i < (len - 16); i += 16) i40e_debug(hw, mask, "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", i, buf[i], buf[i+1], buf[i+2], buf[i+3], buf[i+4], buf[i+5], buf[i+6], buf[i+7], buf[i+8], buf[i+9], buf[i+10], buf[i+11], buf[i+12], buf[i+13], buf[i+14], buf[i+15]); /* the most we could have left is 16 bytes, pad with zeros */ if (i < len) { char d_buf[16]; int j, i_sav; i_sav = i; memset(d_buf, 0, sizeof(d_buf)); for (j = 0; i < len; j++, i++) d_buf[j] = buf[i]; i40e_debug(hw, mask, "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", i_sav, d_buf[0], d_buf[1], d_buf[2], d_buf[3], d_buf[4], d_buf[5], d_buf[6], d_buf[7], d_buf[8], d_buf[9], d_buf[10], d_buf[11], d_buf[12], d_buf[13], d_buf[14], d_buf[15]); } } } /** * i40e_check_asq_alive * @hw: pointer to the hw struct * * Returns TRUE if Queue is enabled else FALSE. **/ bool i40e_check_asq_alive(struct i40e_hw *hw) { if (hw->aq.asq.len) if (!i40e_is_vf(hw)) return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK); if (i40e_is_vf(hw)) return !!(rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQENABLE_MASK); return FALSE; } /** * i40e_aq_queue_shutdown * @hw: pointer to the hw struct * @unloading: is the driver unloading itself * * Tell the Firmware that we're shutting down the AdminQ and whether * or not the driver is unloading as well. **/ enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading) { struct i40e_aq_desc desc; struct i40e_aqc_queue_shutdown *cmd = (struct i40e_aqc_queue_shutdown *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_queue_shutdown); if (unloading) cmd->driver_unloading = CPU_TO_LE32(I40E_AQ_DRIVER_UNLOADING); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); return status; } /** * i40e_aq_get_set_rss_lut * @hw: pointer to the hardware structure * @vsi_id: vsi fw index * @pf_lut: for PF table set TRUE, for VSI table set FALSE * @lut: pointer to the lut buffer provided by the caller * @lut_size: size of the lut buffer * @set: set TRUE to set the table, FALSE to get the table * * Internal function to get or set RSS look up table **/ static enum i40e_status_code i40e_aq_get_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, bool pf_lut, u8 *lut, u16 lut_size, bool set) { enum i40e_status_code status; struct i40e_aq_desc desc; struct i40e_aqc_get_set_rss_lut *cmd_resp = (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; if (set) i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_rss_lut); else i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_rss_lut); /* Indirect command */ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); cmd_resp->vsi_id = CPU_TO_LE16((u16)((vsi_id << I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); if (pf_lut) cmd_resp->flags |= CPU_TO_LE16((u16) ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); else cmd_resp->flags |= CPU_TO_LE16((u16) ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL); return status; } /** * i40e_aq_get_rss_lut * @hw: pointer to the hardware structure * @vsi_id: vsi fw index * @pf_lut: for PF table set TRUE, for VSI table set FALSE * @lut: pointer to the lut buffer provided by the caller * @lut_size: size of the lut buffer * * get the RSS lookup table, PF or VSI type **/ enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, bool pf_lut, u8 *lut, u16 lut_size) { return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, FALSE); } /** * i40e_aq_set_rss_lut * @hw: pointer to the hardware structure * @vsi_id: vsi fw index * @pf_lut: for PF table set TRUE, for VSI table set FALSE * @lut: pointer to the lut buffer provided by the caller * @lut_size: size of the lut buffer * * set the RSS lookup table, PF or VSI type **/ enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, bool pf_lut, u8 *lut, u16 lut_size) { return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, TRUE); } /** * i40e_aq_get_set_rss_key * @hw: pointer to the hw struct * @vsi_id: vsi fw index * @key: pointer to key info struct * @set: set TRUE to set the key, FALSE to get the key * * get the RSS key per VSI **/ static enum i40e_status_code i40e_aq_get_set_rss_key(struct i40e_hw *hw, u16 vsi_id, struct i40e_aqc_get_set_rss_key_data *key, bool set) { enum i40e_status_code status; struct i40e_aq_desc desc; struct i40e_aqc_get_set_rss_key *cmd_resp = (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); if (set) i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_rss_key); else i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_rss_key); /* Indirect command */ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); cmd_resp->vsi_id = CPU_TO_LE16((u16)((vsi_id << I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); return status; } /** * i40e_aq_get_rss_key * @hw: pointer to the hw struct * @vsi_id: vsi fw index * @key: pointer to key info struct * **/ enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw, u16 vsi_id, struct i40e_aqc_get_set_rss_key_data *key) { return i40e_aq_get_set_rss_key(hw, vsi_id, key, FALSE); } /** * i40e_aq_set_rss_key * @hw: pointer to the hw struct * @vsi_id: vsi fw index * @key: pointer to key info struct * * set the RSS key per VSI **/ enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw, u16 vsi_id, struct i40e_aqc_get_set_rss_key_data *key) { return i40e_aq_get_set_rss_key(hw, vsi_id, key, TRUE); } /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the * hardware to a bit-field that can be used by SW to more easily determine the * packet type. * * Macros are used to shorten the table lines and make this table human * readable. * * We store the PTYPE in the top byte of the bit field - this is just so that * we can check that the table doesn't have a row missing, as the index into * the table should be the PTYPE. * * Typical work flow: * * IF NOT i40e_ptype_lookup[ptype].known * THEN * Packet is unknown * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP * Use the rest of the fields to look at the tunnels, inner protocols, etc * ELSE * Use the enum i40e_rx_l2_ptype to decode the packet type * ENDIF */ /* macro to make the table lines short */ #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ { PTYPE, \ 1, \ I40E_RX_PTYPE_OUTER_##OUTER_IP, \ I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ I40E_RX_PTYPE_##OUTER_FRAG, \ I40E_RX_PTYPE_TUNNEL_##T, \ I40E_RX_PTYPE_TUNNEL_END_##TE, \ I40E_RX_PTYPE_##TEF, \ I40E_RX_PTYPE_INNER_PROT_##I, \ I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } #define I40E_PTT_UNUSED_ENTRY(PTYPE) \ { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* shorter macros makes the table fit but are terse */ #define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG #define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG #define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC /* Lookup table mapping the HW PTYPE to the bit field for decoding */ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { /* L2 Packet types */ I40E_PTT_UNUSED_ENTRY(0), I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT_UNUSED_ENTRY(4), I40E_PTT_UNUSED_ENTRY(5), I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT_UNUSED_ENTRY(8), I40E_PTT_UNUSED_ENTRY(9), I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), /* Non Tunneled IPv4 */ I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(25), I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), /* IPv4 --> IPv4 */ I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(32), I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), /* IPv4 --> IPv6 */ I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(39), I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT */ I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), /* IPv4 --> GRE/NAT --> IPv4 */ I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(47), I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT --> IPv6 */ I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(54), I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT --> MAC */ I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(62), I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(69), I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT --> MAC/VLAN */ I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(77), I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(84), I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), /* Non Tunneled IPv6 */ I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(91), I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), /* IPv6 --> IPv4 */ I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(98), I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), /* IPv6 --> IPv6 */ I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(105), I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT */ I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), /* IPv6 --> GRE/NAT -> IPv4 */ I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(113), I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> IPv6 */ I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(120), I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC */ I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(128), I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(135), I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC/VLAN */ I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(143), I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(150), I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), /* unused entries */ I40E_PTT_UNUSED_ENTRY(154), I40E_PTT_UNUSED_ENTRY(155), I40E_PTT_UNUSED_ENTRY(156), I40E_PTT_UNUSED_ENTRY(157), I40E_PTT_UNUSED_ENTRY(158), I40E_PTT_UNUSED_ENTRY(159), I40E_PTT_UNUSED_ENTRY(160), I40E_PTT_UNUSED_ENTRY(161), I40E_PTT_UNUSED_ENTRY(162), I40E_PTT_UNUSED_ENTRY(163), I40E_PTT_UNUSED_ENTRY(164), I40E_PTT_UNUSED_ENTRY(165), I40E_PTT_UNUSED_ENTRY(166), I40E_PTT_UNUSED_ENTRY(167), I40E_PTT_UNUSED_ENTRY(168), I40E_PTT_UNUSED_ENTRY(169), I40E_PTT_UNUSED_ENTRY(170), I40E_PTT_UNUSED_ENTRY(171), I40E_PTT_UNUSED_ENTRY(172), I40E_PTT_UNUSED_ENTRY(173), I40E_PTT_UNUSED_ENTRY(174), I40E_PTT_UNUSED_ENTRY(175), I40E_PTT_UNUSED_ENTRY(176), I40E_PTT_UNUSED_ENTRY(177), I40E_PTT_UNUSED_ENTRY(178), I40E_PTT_UNUSED_ENTRY(179), I40E_PTT_UNUSED_ENTRY(180), I40E_PTT_UNUSED_ENTRY(181), I40E_PTT_UNUSED_ENTRY(182), I40E_PTT_UNUSED_ENTRY(183), I40E_PTT_UNUSED_ENTRY(184), I40E_PTT_UNUSED_ENTRY(185), I40E_PTT_UNUSED_ENTRY(186), I40E_PTT_UNUSED_ENTRY(187), I40E_PTT_UNUSED_ENTRY(188), I40E_PTT_UNUSED_ENTRY(189), I40E_PTT_UNUSED_ENTRY(190), I40E_PTT_UNUSED_ENTRY(191), I40E_PTT_UNUSED_ENTRY(192), I40E_PTT_UNUSED_ENTRY(193), I40E_PTT_UNUSED_ENTRY(194), I40E_PTT_UNUSED_ENTRY(195), I40E_PTT_UNUSED_ENTRY(196), I40E_PTT_UNUSED_ENTRY(197), I40E_PTT_UNUSED_ENTRY(198), I40E_PTT_UNUSED_ENTRY(199), I40E_PTT_UNUSED_ENTRY(200), I40E_PTT_UNUSED_ENTRY(201), I40E_PTT_UNUSED_ENTRY(202), I40E_PTT_UNUSED_ENTRY(203), I40E_PTT_UNUSED_ENTRY(204), I40E_PTT_UNUSED_ENTRY(205), I40E_PTT_UNUSED_ENTRY(206), I40E_PTT_UNUSED_ENTRY(207), I40E_PTT_UNUSED_ENTRY(208), I40E_PTT_UNUSED_ENTRY(209), I40E_PTT_UNUSED_ENTRY(210), I40E_PTT_UNUSED_ENTRY(211), I40E_PTT_UNUSED_ENTRY(212), I40E_PTT_UNUSED_ENTRY(213), I40E_PTT_UNUSED_ENTRY(214), I40E_PTT_UNUSED_ENTRY(215), I40E_PTT_UNUSED_ENTRY(216), I40E_PTT_UNUSED_ENTRY(217), I40E_PTT_UNUSED_ENTRY(218), I40E_PTT_UNUSED_ENTRY(219), I40E_PTT_UNUSED_ENTRY(220), I40E_PTT_UNUSED_ENTRY(221), I40E_PTT_UNUSED_ENTRY(222), I40E_PTT_UNUSED_ENTRY(223), I40E_PTT_UNUSED_ENTRY(224), I40E_PTT_UNUSED_ENTRY(225), I40E_PTT_UNUSED_ENTRY(226), I40E_PTT_UNUSED_ENTRY(227), I40E_PTT_UNUSED_ENTRY(228), I40E_PTT_UNUSED_ENTRY(229), I40E_PTT_UNUSED_ENTRY(230), I40E_PTT_UNUSED_ENTRY(231), I40E_PTT_UNUSED_ENTRY(232), I40E_PTT_UNUSED_ENTRY(233), I40E_PTT_UNUSED_ENTRY(234), I40E_PTT_UNUSED_ENTRY(235), I40E_PTT_UNUSED_ENTRY(236), I40E_PTT_UNUSED_ENTRY(237), I40E_PTT_UNUSED_ENTRY(238), I40E_PTT_UNUSED_ENTRY(239), I40E_PTT_UNUSED_ENTRY(240), I40E_PTT_UNUSED_ENTRY(241), I40E_PTT_UNUSED_ENTRY(242), I40E_PTT_UNUSED_ENTRY(243), I40E_PTT_UNUSED_ENTRY(244), I40E_PTT_UNUSED_ENTRY(245), I40E_PTT_UNUSED_ENTRY(246), I40E_PTT_UNUSED_ENTRY(247), I40E_PTT_UNUSED_ENTRY(248), I40E_PTT_UNUSED_ENTRY(249), I40E_PTT_UNUSED_ENTRY(250), I40E_PTT_UNUSED_ENTRY(251), I40E_PTT_UNUSED_ENTRY(252), I40E_PTT_UNUSED_ENTRY(253), I40E_PTT_UNUSED_ENTRY(254), I40E_PTT_UNUSED_ENTRY(255) }; /** * i40e_validate_mac_addr - Validate unicast MAC address * @mac_addr: pointer to MAC address * * Tests a MAC address to ensure it is a valid Individual Address **/ enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr) { enum i40e_status_code status = I40E_SUCCESS; DEBUGFUNC("i40e_validate_mac_addr"); /* Broadcast addresses ARE multicast addresses * Make sure it is not a multicast address * Reject the zero address */ if (I40E_IS_MULTICAST(mac_addr) || (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)) status = I40E_ERR_INVALID_MAC_ADDR; return status; } /** * i40e_init_shared_code - Initialize the shared code * @hw: pointer to hardware structure * * This assigns the MAC type and PHY code and inits the NVM. * Does not touch the hardware. This function must be called prior to any * other function in the shared code. The i40e_hw structure should be * memset to 0 prior to calling this function. The following fields in * hw structure should be filled in prior to calling this function: * hw_addr, back, device_id, vendor_id, subsystem_device_id, * subsystem_vendor_id, and revision_id **/ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw) { enum i40e_status_code status = I40E_SUCCESS; u32 port, ari, func_rid; DEBUGFUNC("i40e_init_shared_code"); i40e_set_mac_type(hw); switch (hw->mac.type) { case I40E_MAC_XL710: case I40E_MAC_X722: break; default: return I40E_ERR_DEVICE_NOT_SUPPORTED; } hw->phy.get_link_info = TRUE; /* Determine port number and PF number*/ port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; hw->port = (u8)port; ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; func_rid = rd32(hw, I40E_PF_FUNC_RID); if (ari) hw->pf_id = (u8)(func_rid & 0xff); else hw->pf_id = (u8)(func_rid & 0x7); if (hw->mac.type == I40E_MAC_X722) hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE | I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; + /* NVMUpdate features structure initialization */ + hw->nvmupd_features.major = I40E_NVMUPD_FEATURES_API_VER_MAJOR; + hw->nvmupd_features.minor = I40E_NVMUPD_FEATURES_API_VER_MINOR; + hw->nvmupd_features.size = sizeof(hw->nvmupd_features); + i40e_memset(hw->nvmupd_features.features, 0x0, + I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN * + sizeof(*hw->nvmupd_features.features), + I40E_NONDMA_MEM); + + /* No features supported at the moment */ + hw->nvmupd_features.features[0] = 0; + status = i40e_init_nvm(hw); return status; } /** * i40e_aq_mac_address_read - Retrieve the MAC addresses * @hw: pointer to the hw struct * @flags: a return indicator of what addresses were added to the addr store * @addrs: the requestor's mac addr store * @cmd_details: pointer to command details structure or NULL **/ static enum i40e_status_code i40e_aq_mac_address_read(struct i40e_hw *hw, u16 *flags, struct i40e_aqc_mac_address_read_data *addrs, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_mac_address_read *cmd_data = (struct i40e_aqc_mac_address_read *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF); status = i40e_asq_send_command(hw, &desc, addrs, sizeof(*addrs), cmd_details); *flags = LE16_TO_CPU(cmd_data->command_flags); return status; } /** * i40e_aq_mac_address_write - Change the MAC addresses * @hw: pointer to the hw struct * @flags: indicates which MAC to be written * @mac_addr: address to write * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw, u16 flags, u8 *mac_addr, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_mac_address_write *cmd_data = (struct i40e_aqc_mac_address_write *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_write); cmd_data->command_flags = CPU_TO_LE16(flags); cmd_data->mac_sah = CPU_TO_LE16((u16)mac_addr[0] << 8 | mac_addr[1]); cmd_data->mac_sal = CPU_TO_LE32(((u32)mac_addr[2] << 24) | ((u32)mac_addr[3] << 16) | ((u32)mac_addr[4] << 8) | mac_addr[5]); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_get_mac_addr - get MAC address * @hw: pointer to the HW structure * @mac_addr: pointer to MAC address * * Reads the adapter's MAC address from register **/ enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) { struct i40e_aqc_mac_address_read_data addrs; enum i40e_status_code status; u16 flags = 0; status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); if (flags & I40E_AQC_LAN_ADDR_VALID) i40e_memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac), I40E_NONDMA_TO_NONDMA); return status; } /** * i40e_get_port_mac_addr - get Port MAC address * @hw: pointer to the HW structure * @mac_addr: pointer to Port MAC address * * Reads the adapter's Port MAC address **/ enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) { struct i40e_aqc_mac_address_read_data addrs; enum i40e_status_code status; u16 flags = 0; status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); if (status) return status; if (flags & I40E_AQC_PORT_ADDR_VALID) i40e_memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac), I40E_NONDMA_TO_NONDMA); else status = I40E_ERR_INVALID_MAC_ADDR; return status; } /** * i40e_pre_tx_queue_cfg - pre tx queue configure * @hw: pointer to the HW structure * @queue: target pf queue index * @enable: state change request * * Handles hw requirement to indicate intention to enable * or disable target queue. **/ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) { u32 abs_queue_idx = hw->func_caps.base_queue + queue; u32 reg_block = 0; u32 reg_val; if (abs_queue_idx >= 128) { reg_block = abs_queue_idx / 128; abs_queue_idx %= 128; } reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); if (enable) reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK; else reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); } /** * i40e_read_pba_string - Reads part number string from EEPROM * @hw: pointer to hardware structure * @pba_num: stores the part number string from the EEPROM * @pba_num_size: part number string buffer length * * Reads the part number string from the EEPROM. **/ enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, u32 pba_num_size) { enum i40e_status_code status = I40E_SUCCESS; u16 pba_word = 0; u16 pba_size = 0; u16 pba_ptr = 0; u16 i = 0; status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); if ((status != I40E_SUCCESS) || (pba_word != 0xFAFA)) { DEBUGOUT("Failed to read PBA flags or flag is invalid.\n"); return status; } status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); if (status != I40E_SUCCESS) { DEBUGOUT("Failed to read PBA Block pointer.\n"); return status; } status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); if (status != I40E_SUCCESS) { DEBUGOUT("Failed to read PBA Block size.\n"); return status; } /* Subtract one to get PBA word count (PBA Size word is included in * total size) */ pba_size--; if (pba_num_size < (((u32)pba_size * 2) + 1)) { DEBUGOUT("Buffer to small for PBA data.\n"); return I40E_ERR_PARAM; } for (i = 0; i < pba_size; i++) { status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word); if (status != I40E_SUCCESS) { DEBUGOUT1("Failed to read PBA Block word %d.\n", i); return status; } pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; pba_num[(i * 2) + 1] = pba_word & 0xFF; } pba_num[(pba_size * 2)] = '\0'; return status; } /** * i40e_get_media_type - Gets media type * @hw: pointer to the hardware structure **/ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) { enum i40e_media_type media; switch (hw->phy.link_info.phy_type) { case I40E_PHY_TYPE_10GBASE_SR: case I40E_PHY_TYPE_10GBASE_LR: case I40E_PHY_TYPE_1000BASE_SX: case I40E_PHY_TYPE_1000BASE_LX: case I40E_PHY_TYPE_40GBASE_SR4: case I40E_PHY_TYPE_40GBASE_LR4: case I40E_PHY_TYPE_25GBASE_LR: case I40E_PHY_TYPE_25GBASE_SR: media = I40E_MEDIA_TYPE_FIBER; break; case I40E_PHY_TYPE_100BASE_TX: case I40E_PHY_TYPE_1000BASE_T: + case I40E_PHY_TYPE_2_5GBASE_T: + case I40E_PHY_TYPE_5GBASE_T: case I40E_PHY_TYPE_10GBASE_T: media = I40E_MEDIA_TYPE_BASET; break; case I40E_PHY_TYPE_10GBASE_CR1_CU: case I40E_PHY_TYPE_40GBASE_CR4_CU: case I40E_PHY_TYPE_10GBASE_CR1: case I40E_PHY_TYPE_40GBASE_CR4: case I40E_PHY_TYPE_10GBASE_SFPP_CU: case I40E_PHY_TYPE_40GBASE_AOC: case I40E_PHY_TYPE_10GBASE_AOC: case I40E_PHY_TYPE_25GBASE_CR: case I40E_PHY_TYPE_25GBASE_AOC: case I40E_PHY_TYPE_25GBASE_ACC: media = I40E_MEDIA_TYPE_DA; break; case I40E_PHY_TYPE_1000BASE_KX: case I40E_PHY_TYPE_10GBASE_KX4: case I40E_PHY_TYPE_10GBASE_KR: case I40E_PHY_TYPE_40GBASE_KR4: case I40E_PHY_TYPE_20GBASE_KR2: case I40E_PHY_TYPE_25GBASE_KR: media = I40E_MEDIA_TYPE_BACKPLANE; break; case I40E_PHY_TYPE_SGMII: case I40E_PHY_TYPE_XAUI: case I40E_PHY_TYPE_XFI: case I40E_PHY_TYPE_XLAUI: case I40E_PHY_TYPE_XLPPI: default: media = I40E_MEDIA_TYPE_UNKNOWN; break; } return media; } +/** + * i40e_poll_globr - Poll for Global Reset completion + * @hw: pointer to the hardware structure + * @retry_limit: how many times to retry before failure + **/ +static enum i40e_status_code i40e_poll_globr(struct i40e_hw *hw, + u32 retry_limit) +{ + u32 cnt, reg = 0; + + for (cnt = 0; cnt < retry_limit; cnt++) { + reg = rd32(hw, I40E_GLGEN_RSTAT); + if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) + return I40E_SUCCESS; + i40e_msec_delay(100); + } + + DEBUGOUT("Global reset failed.\n"); + DEBUGOUT1("I40E_GLGEN_RSTAT = 0x%x\n", reg); + + return I40E_ERR_RESET_FAILED; +} + #define I40E_PF_RESET_WAIT_COUNT 200 /** * i40e_pf_reset - Reset the PF * @hw: pointer to the hardware structure * * Assuming someone else has triggered a global reset, * assure the global reset is complete and then reset the PF **/ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw) { u32 cnt = 0; u32 cnt1 = 0; u32 reg = 0; u32 grst_del; /* Poll for Global Reset steady state in case of recent GRST. * The grst delay value is in 100ms units, and we'll wait a * couple counts longer to be sure we don't just miss the end. */ grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; - grst_del = grst_del * 20; + grst_del = min(grst_del * 20, 160U); for (cnt = 0; cnt < grst_del; cnt++) { reg = rd32(hw, I40E_GLGEN_RSTAT); if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) break; i40e_msec_delay(100); } if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { DEBUGOUT("Global reset polling failed to complete.\n"); return I40E_ERR_RESET_FAILED; } /* Now Wait for the FW to be ready */ for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { reg = rd32(hw, I40E_GLNVM_ULD); reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { DEBUGOUT1("Core and Global modules ready %d\n", cnt1); break; } i40e_msec_delay(10); } if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { DEBUGOUT("wait for FW Reset complete timedout\n"); DEBUGOUT1("I40E_GLNVM_ULD = 0x%x\n", reg); return I40E_ERR_RESET_FAILED; } /* If there was a Global Reset in progress when we got here, * we don't need to do the PF Reset */ if (!cnt) { u32 reg2 = 0; reg = rd32(hw, I40E_PFGEN_CTRL); wr32(hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) { reg = rd32(hw, I40E_PFGEN_CTRL); if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) break; reg2 = rd32(hw, I40E_GLGEN_RSTAT); - if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { - DEBUGOUT("Core reset upcoming. Skipping PF reset request.\n"); - DEBUGOUT1("I40E_GLGEN_RSTAT = 0x%x\n", reg2); - return I40E_ERR_NOT_READY; - } + if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) + break; i40e_msec_delay(1); } - if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { + if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { + if (i40e_poll_globr(hw, grst_del) != I40E_SUCCESS) + return I40E_ERR_RESET_FAILED; + } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { DEBUGOUT("PF reset polling failed to complete.\n"); return I40E_ERR_RESET_FAILED; } } i40e_clear_pxe_mode(hw); return I40E_SUCCESS; } /** * i40e_clear_hw - clear out any left over hw state * @hw: pointer to the hw struct * * Clear queues and interrupts, typically called at init time, * but after the capabilities have been found so we know how many * queues and msix vectors have been allocated. **/ void i40e_clear_hw(struct i40e_hw *hw) { u32 num_queues, base_queue; u32 num_pf_int; u32 num_vf_int; u32 num_vfs; u32 i, j; u32 val; u32 eol = 0x7ff; /* get number of interrupts, queues, and vfs */ val = rd32(hw, I40E_GLPCI_CNF2); num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; val = rd32(hw, I40E_PFLAN_QALLOC); base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> I40E_PFLAN_QALLOC_LASTQ_SHIFT; if (val & I40E_PFLAN_QALLOC_VALID_MASK) num_queues = (j - base_queue) + 1; else num_queues = 0; val = rd32(hw, I40E_PF_VT_PFALLOC); i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> I40E_PF_VT_PFALLOC_LASTVF_SHIFT; if (val & I40E_PF_VT_PFALLOC_VALID_MASK) num_vfs = (j - i) + 1; else num_vfs = 0; /* stop all the interrupts */ wr32(hw, I40E_PFINT_ICR0_ENA, 0); val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; for (i = 0; i < num_pf_int - 2; i++) wr32(hw, I40E_PFINT_DYN_CTLN(i), val); /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; wr32(hw, I40E_PFINT_LNKLST0, val); for (i = 0; i < num_pf_int - 2; i++) wr32(hw, I40E_PFINT_LNKLSTN(i), val); val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; for (i = 0; i < num_vfs; i++) wr32(hw, I40E_VPINT_LNKLST0(i), val); for (i = 0; i < num_vf_int - 2; i++) wr32(hw, I40E_VPINT_LNKLSTN(i), val); /* warn the HW of the coming Tx disables */ for (i = 0; i < num_queues; i++) { u32 abs_queue_idx = base_queue + i; u32 reg_block = 0; if (abs_queue_idx >= 128) { reg_block = abs_queue_idx / 128; abs_queue_idx %= 128; } val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val); } i40e_usec_delay(400); /* stop all the queues */ for (i = 0; i < num_queues; i++) { wr32(hw, I40E_QINT_TQCTL(i), 0); wr32(hw, I40E_QTX_ENA(i), 0); wr32(hw, I40E_QINT_RQCTL(i), 0); wr32(hw, I40E_QRX_ENA(i), 0); } /* short wait for all queue disables to settle */ i40e_usec_delay(50); } /** * i40e_clear_pxe_mode - clear pxe operations mode * @hw: pointer to the hw struct * * Make sure all PXE mode settings are cleared, including things * like descriptor fetch/write-back mode. **/ void i40e_clear_pxe_mode(struct i40e_hw *hw) { if (i40e_check_asq_alive(hw)) i40e_aq_clear_pxe_mode(hw, NULL); } /** * i40e_led_is_mine - helper to find matching led * @hw: pointer to the hw struct * @idx: index into GPIO registers * * returns: 0 if no match, otherwise the value of the GPIO_CTL register */ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) { u32 gpio_val = 0; u32 port; if (!hw->func_caps.led[idx]) return 0; gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; /* if PRT_NUM_NA is 1 then this LED is not port specific, OR * if it is not our port then ignore */ if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || (port != hw->port)) return 0; return gpio_val; } #define I40E_COMBINED_ACTIVITY 0xA #define I40E_FILTER_ACTIVITY 0xE #define I40E_LINK_ACTIVITY 0xC #define I40E_MAC_ACTIVITY 0xD #define I40E_LED0 22 /** * i40e_led_get - return current on/off mode * @hw: pointer to the hw struct * * The value returned is the 'mode' field as defined in the * GPIO register definitions: 0x0 = off, 0xf = on, and other * values are variations of possible behaviors relating to * blink, link, and wire. **/ u32 i40e_led_get(struct i40e_hw *hw) { - u32 current_mode = 0; u32 mode = 0; int i; /* as per the documentation GPIO 22-29 are the LED * GPIO pins named LED0..LED7 */ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { u32 gpio_val = i40e_led_is_mine(hw, i); if (!gpio_val) continue; - - /* ignore gpio LED src mode entries related to the activity - * LEDs - */ - current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) - >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); - switch (current_mode) { - case I40E_COMBINED_ACTIVITY: - case I40E_FILTER_ACTIVITY: - case I40E_MAC_ACTIVITY: - case I40E_LINK_ACTIVITY: - continue; - default: - break; - } - mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; break; } - return mode; } /** * i40e_led_set - set new on/off mode * @hw: pointer to the hw struct * @mode: 0=off, 0xf=on (else see manual for mode details) * @blink: TRUE if the LED should blink when on, FALSE if steady * * if this function is used to turn on the blink it should * be used to disable the blink when restoring the original state. **/ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) { - u32 current_mode = 0; int i; if (mode & 0xfffffff0) DEBUGOUT1("invalid mode passed in %X\n", mode); /* as per the documentation GPIO 22-29 are the LED * GPIO pins named LED0..LED7 */ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { u32 gpio_val = i40e_led_is_mine(hw, i); if (!gpio_val) continue; - - /* ignore gpio LED src mode entries related to the activity - * LEDs - */ - current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) - >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); - switch (current_mode) { - case I40E_COMBINED_ACTIVITY: - case I40E_FILTER_ACTIVITY: - case I40E_MAC_ACTIVITY: - case I40E_LINK_ACTIVITY: - continue; - default: - break; - } - gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; /* this & is a bit of paranoia, but serves as a range check */ gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); if (blink) gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); else gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); break; } } /* Admin command wrappers */ /** * i40e_aq_get_phy_capabilities * @hw: pointer to the hw struct * @abilities: structure for PHY capabilities to be filled * @qualified_modules: report Qualified Modules * @report_init: report init capabilities (active are default) * @cmd_details: pointer to command details structure or NULL * * Returns the various PHY abilities supported on the Port. **/ enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw, bool qualified_modules, bool report_init, struct i40e_aq_get_phy_abilities_resp *abilities, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; enum i40e_status_code status; u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0; u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); if (!abilities) return I40E_ERR_PARAM; do { i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_phy_abilities); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (abilities_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); if (qualified_modules) desc.params.external.param0 |= CPU_TO_LE32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); if (report_init) desc.params.external.param0 |= CPU_TO_LE32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); status = i40e_asq_send_command(hw, &desc, abilities, abilities_size, cmd_details); if (status != I40E_SUCCESS) break; if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) { status = I40E_ERR_UNKNOWN_PHY; break; } else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) { i40e_msec_delay(1); total_delay++; status = I40E_ERR_TIMEOUT; } } while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) && (total_delay < max_delay)); if (status != I40E_SUCCESS) return status; if (report_init) { if (hw->mac.type == I40E_MAC_XL710 && hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { status = i40e_aq_get_link_info(hw, TRUE, NULL, NULL); } else { hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type); hw->phy.phy_types |= ((u64)abilities->phy_type_ext << 32); } } return status; } /** * i40e_aq_set_phy_config * @hw: pointer to the hw struct * @config: structure with PHY configuration to be set * @cmd_details: pointer to command details structure or NULL * * Set the various PHY configuration parameters * supported on the Port.One or more of the Set PHY config parameters may be * ignored in an MFP mode as the PF may not have the privilege to set some * of the PHY Config parameters. This status will be indicated by the * command response. **/ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, struct i40e_aq_set_phy_config *config, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aq_set_phy_config *cmd = (struct i40e_aq_set_phy_config *)&desc.params.raw; enum i40e_status_code status; if (!config) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_phy_config); *cmd = *config; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_set_fc * @hw: pointer to the hw struct * @aq_failures: buffer to return AdminQ failure information * @atomic_restart: whether to enable atomic link restart * * Set the requested flow control mode using set_phy_config. **/ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, bool atomic_restart) { enum i40e_fc_mode fc_mode = hw->fc.requested_mode; struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_aq_set_phy_config config; enum i40e_status_code status; u8 pause_mask = 0x0; *aq_failures = 0x0; switch (fc_mode) { case I40E_FC_FULL: pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; break; case I40E_FC_RX_PAUSE: pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; break; case I40E_FC_TX_PAUSE: pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; break; default: break; } /* Get the current phy config */ status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities, NULL); if (status) { *aq_failures |= I40E_SET_FC_AQ_FAIL_GET; return status; } memset(&config, 0, sizeof(config)); /* clear the old pause settings */ config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & ~(I40E_AQ_PHY_FLAG_PAUSE_RX); /* set the new abilities */ config.abilities |= pause_mask; /* If the abilities have changed, then set the new config */ if (config.abilities != abilities.abilities) { /* Auto restart link so settings take effect */ if (atomic_restart) config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; /* Copy over all the old settings */ config.phy_type = abilities.phy_type; config.phy_type_ext = abilities.phy_type_ext; config.link_speed = abilities.link_speed; config.eee_capability = abilities.eee_capability; config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; config.fec_config = abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_PHY_FEC_CONFIG_MASK; status = i40e_aq_set_phy_config(hw, &config, NULL); if (status) *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; } /* Update the link info */ status = i40e_update_link_info(hw); if (status) { /* Wait a little bit (on 40G cards it sometimes takes a really * long time for link to come back from the atomic reset) * and try once more */ i40e_msec_delay(1000); status = i40e_update_link_info(hw); } if (status) *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; return status; } /** * i40e_aq_set_mac_config * @hw: pointer to the hw struct * @max_frame_size: Maximum Frame Size to be supported by the port * @crc_en: Tell HW to append a CRC to outgoing frames * @pacing: Pacing configurations * @cmd_details: pointer to command details structure or NULL * * Configure MAC settings for frame size, jumbo frame support and the * addition of a CRC by the hardware. **/ enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw, u16 max_frame_size, bool crc_en, u16 pacing, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aq_set_mac_config *cmd = (struct i40e_aq_set_mac_config *)&desc.params.raw; enum i40e_status_code status; if (max_frame_size == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_mac_config); cmd->max_frame_size = CPU_TO_LE16(max_frame_size); cmd->params = ((u8)pacing & 0x0F) << 3; if (crc_en) cmd->params |= I40E_AQ_SET_MAC_CONFIG_CRC_EN; +#define I40E_AQ_SET_MAC_CONFIG_FC_DEFAULT_THRESHOLD 0x7FFF + cmd->fc_refresh_threshold = + CPU_TO_LE16(I40E_AQ_SET_MAC_CONFIG_FC_DEFAULT_THRESHOLD); + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_clear_pxe_mode * @hw: pointer to the hw struct * @cmd_details: pointer to command details structure or NULL * * Tell the firmware that the driver is taking over from PXE **/ enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details) { enum i40e_status_code status; struct i40e_aq_desc desc; struct i40e_aqc_clear_pxe *cmd = (struct i40e_aqc_clear_pxe *)&desc.params.raw; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_clear_pxe_mode); cmd->rx_cnt = 0x2; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); wr32(hw, I40E_GLLAN_RCTL_0, 0x1); return status; } /** * i40e_aq_set_link_restart_an * @hw: pointer to the hw struct * @enable_link: if TRUE: enable link, if FALSE: disable link * @cmd_details: pointer to command details structure or NULL * * Sets up the link and restarts the Auto-Negotiation over the link. **/ enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw, bool enable_link, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_link_restart_an *cmd = (struct i40e_aqc_set_link_restart_an *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_link_restart_an); cmd->command = I40E_AQ_PHY_RESTART_AN; if (enable_link) cmd->command |= I40E_AQ_PHY_LINK_ENABLE; else cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_link_info * @hw: pointer to the hw struct * @enable_lse: enable/disable LinkStatusEvent reporting * @link: pointer to link status structure - optional * @cmd_details: pointer to command details structure or NULL * * Returns the link status of the adapter. **/ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw, bool enable_lse, struct i40e_link_status *link, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_link_status *resp = (struct i40e_aqc_get_link_status *)&desc.params.raw; struct i40e_link_status *hw_link_info = &hw->phy.link_info; enum i40e_status_code status; bool tx_pause, rx_pause; u16 command_flags; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); if (enable_lse) command_flags = I40E_AQ_LSE_ENABLE; else command_flags = I40E_AQ_LSE_DISABLE; resp->command_flags = CPU_TO_LE16(command_flags); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status != I40E_SUCCESS) goto aq_get_link_info_exit; /* save off old link status information */ i40e_memcpy(&hw->phy.link_info_old, hw_link_info, sizeof(*hw_link_info), I40E_NONDMA_TO_NONDMA); /* update link status */ hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; hw->phy.media_type = i40e_get_media_type(hw); hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; hw_link_info->link_info = resp->link_info; hw_link_info->an_info = resp->an_info; hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA | I40E_AQ_CONFIG_FEC_RS_ENA); hw_link_info->ext_info = resp->ext_info; hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK; hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size); hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; /* update fc info */ tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX); rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX); if (tx_pause & rx_pause) hw->fc.current_mode = I40E_FC_FULL; else if (tx_pause) hw->fc.current_mode = I40E_FC_TX_PAUSE; else if (rx_pause) hw->fc.current_mode = I40E_FC_RX_PAUSE; else hw->fc.current_mode = I40E_FC_NONE; if (resp->config & I40E_AQ_CONFIG_CRC_ENA) hw_link_info->crc_enable = TRUE; else hw_link_info->crc_enable = FALSE; if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_IS_ENABLED)) hw_link_info->lse_enable = TRUE; else hw_link_info->lse_enable = FALSE; if ((hw->mac.type == I40E_MAC_XL710) && (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; - if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && - hw->aq.api_min_ver >= 7) { + if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { __le32 tmp; i40e_memcpy(&tmp, resp->link_type, sizeof(tmp), I40E_NONDMA_TO_NONDMA); hw->phy.phy_types = LE32_TO_CPU(tmp); hw->phy.phy_types |= ((u64)resp->link_type_ext << 32); } /* save link status information */ if (link) i40e_memcpy(link, hw_link_info, sizeof(*hw_link_info), I40E_NONDMA_TO_NONDMA); /* flag cleared so helper functions don't call AQ again */ hw->phy.get_link_info = FALSE; aq_get_link_info_exit: return status; } /** * i40e_aq_set_phy_int_mask * @hw: pointer to the hw struct * @mask: interrupt mask to be set * @cmd_details: pointer to command details structure or NULL * * Set link interrupt mask. **/ enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_phy_int_mask *cmd = (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_phy_int_mask); cmd->event_mask = CPU_TO_LE16(mask); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_local_advt_reg * @hw: pointer to the hw struct * @advt_reg: local AN advertisement register value * @cmd_details: pointer to command details structure or NULL * * Get the Local AN advertisement register value. **/ enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw, u64 *advt_reg, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_an_advt_reg *resp = (struct i40e_aqc_an_advt_reg *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_local_advt_reg); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status != I40E_SUCCESS) goto aq_get_local_advt_reg_exit; *advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32; *advt_reg |= LE32_TO_CPU(resp->local_an_reg0); aq_get_local_advt_reg_exit: return status; } /** * i40e_aq_set_local_advt_reg * @hw: pointer to the hw struct * @advt_reg: local AN advertisement register value * @cmd_details: pointer to command details structure or NULL * * Get the Local AN advertisement register value. **/ enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw, u64 advt_reg, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_an_advt_reg *cmd = (struct i40e_aqc_an_advt_reg *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_local_advt_reg); cmd->local_an_reg0 = CPU_TO_LE32(I40E_LO_DWORD(advt_reg)); cmd->local_an_reg1 = CPU_TO_LE16(I40E_HI_DWORD(advt_reg)); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_partner_advt * @hw: pointer to the hw struct * @advt_reg: AN partner advertisement register value * @cmd_details: pointer to command details structure or NULL * * Get the link partner AN advertisement register value. **/ enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw, u64 *advt_reg, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_an_advt_reg *resp = (struct i40e_aqc_an_advt_reg *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_partner_advt); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status != I40E_SUCCESS) goto aq_get_partner_advt_exit; *advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32; *advt_reg |= LE32_TO_CPU(resp->local_an_reg0); aq_get_partner_advt_exit: return status; } /** * i40e_aq_set_lb_modes * @hw: pointer to the hw struct * @lb_modes: loopback mode to be set * @cmd_details: pointer to command details structure or NULL * * Sets loopback modes. **/ enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw, u8 lb_level, u8 lb_type, u8 speed, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_lb_mode *cmd = (struct i40e_aqc_set_lb_mode *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_lb_modes); cmd->lb_level = lb_level; cmd->lb_type = lb_type; cmd->speed = speed; if (speed) cmd->force_speed = 1; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_phy_debug * @hw: pointer to the hw struct * @cmd_flags: debug command flags * @cmd_details: pointer to command details structure or NULL * * Reset the external PHY. **/ enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_phy_debug *cmd = (struct i40e_aqc_set_phy_debug *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_phy_debug); cmd->command_flags = cmd_flags; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_add_vsi * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct * @cmd_details: pointer to command details structure or NULL * * Add a VSI context to the hardware. **/ enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_get_update_vsi *cmd = (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; struct i40e_aqc_add_get_update_vsi_completion *resp = (struct i40e_aqc_add_get_update_vsi_completion *) &desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vsi); cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->uplink_seid); cmd->connection_type = vsi_ctx->connection_type; cmd->vf_id = vsi_ctx->vf_num; cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), cmd_details); if (status != I40E_SUCCESS) goto aq_add_vsi_exit; vsi_ctx->seid = LE16_TO_CPU(resp->seid); vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number); vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used); vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free); aq_add_vsi_exit: return status; } /** * i40e_aq_set_default_vsi * @hw: pointer to the hw struct * @seid: vsi number * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 seid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *) &desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); cmd->promiscuous_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT); cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_clear_default_vsi * @hw: pointer to the hw struct * @seid: vsi number * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 seid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *) &desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); cmd->promiscuous_flags = CPU_TO_LE16(0); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT); cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_unicast_promiscuous * @hw: pointer to the hw struct * @seid: vsi number * @set: set unicast promiscuous enable/disable * @cmd_details: pointer to command details structure or NULL * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc **/ enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details, bool rx_only_promisc) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; enum i40e_status_code status; u16 flags = 0; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (set) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; if (rx_only_promisc && (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || (hw->aq.api_maj_ver > 1))) flags |= I40E_AQC_SET_VSI_PROMISC_TX; } cmd->promiscuous_flags = CPU_TO_LE16(flags); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST); if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) || (hw->aq.api_maj_ver > 1)) cmd->valid_flags |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_TX); cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_multicast_promiscuous * @hw: pointer to the hw struct * @seid: vsi number * @set: set multicast promiscuous enable/disable * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; enum i40e_status_code status; u16 flags = 0; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (set) flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; cmd->promiscuous_flags = CPU_TO_LE16(flags); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_full_promiscuous * @hw: pointer to the hw struct * @seid: VSI number * @set: set promiscuous enable/disable * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw, u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; enum i40e_status_code status; u16 flags = 0; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (set) flags = I40E_AQC_SET_VSI_PROMISC_UNICAST | I40E_AQC_SET_VSI_PROMISC_MULTICAST | I40E_AQC_SET_VSI_PROMISC_BROADCAST; cmd->promiscuous_flags = CPU_TO_LE16(flags); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST | I40E_AQC_SET_VSI_PROMISC_MULTICAST | I40E_AQC_SET_VSI_PROMISC_BROADCAST); cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_mc_promisc_on_vlan * @hw: pointer to the hw struct * @seid: vsi number * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, u16 seid, bool enable, u16 vid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; enum i40e_status_code status; u16 flags = 0; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (enable) flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; cmd->promiscuous_flags = CPU_TO_LE16(flags); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); cmd->seid = CPU_TO_LE16(seid); cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_uc_promisc_on_vlan * @hw: pointer to the hw struct * @seid: vsi number * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, u16 seid, bool enable, u16 vid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; enum i40e_status_code status; u16 flags = 0; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (enable) flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; cmd->promiscuous_flags = CPU_TO_LE16(flags); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST); cmd->seid = CPU_TO_LE16(seid); cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_bc_promisc_on_vlan * @hw: pointer to the hw struct * @seid: vsi number * @enable: set broadcast promiscuous enable/disable for a given VLAN * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, u16 seid, bool enable, u16 vid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; enum i40e_status_code status; u16 flags = 0; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (enable) flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST; cmd->promiscuous_flags = CPU_TO_LE16(flags); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); cmd->seid = CPU_TO_LE16(seid); cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_broadcast * @hw: pointer to the hw struct * @seid: vsi number * @set_filter: TRUE to set filter, FALSE to clear filter * @cmd_details: pointer to command details structure or NULL * * Set or clear the broadcast promiscuous flag (filter) for a given VSI. **/ enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, u16 seid, bool set_filter, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (set_filter) cmd->promiscuous_flags |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); else cmd->promiscuous_flags &= CPU_TO_LE16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting * @hw: pointer to the hw struct * @seid: vsi number * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, u16 seid, bool enable, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; enum i40e_status_code status; u16 flags = 0; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (enable) flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; cmd->promiscuous_flags = CPU_TO_LE16(flags); cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_VLAN); cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_get_vsi_params - get VSI configuration info * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_get_update_vsi *cmd = (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; struct i40e_aqc_add_get_update_vsi_completion *resp = (struct i40e_aqc_add_get_update_vsi_completion *) &desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_vsi_parameters); cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), NULL); if (status != I40E_SUCCESS) goto aq_get_vsi_params_exit; vsi_ctx->seid = LE16_TO_CPU(resp->seid); vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number); vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used); vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free); aq_get_vsi_params_exit: return status; } /** * i40e_aq_update_vsi_params * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct * @cmd_details: pointer to command details structure or NULL * * Update a VSI context. **/ enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_get_update_vsi *cmd = (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; struct i40e_aqc_add_get_update_vsi_completion *resp = (struct i40e_aqc_add_get_update_vsi_completion *) &desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_update_vsi_parameters); cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), cmd_details); vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used); vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free); return status; } /** * i40e_aq_get_switch_config * @hw: pointer to the hardware structure * @buf: pointer to the result buffer * @buf_size: length of input buffer * @start_seid: seid to start for the report, 0 == beginning * @cmd_details: pointer to command details structure or NULL * * Fill the buf with switch configuration returned from AdminQ command **/ enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw, struct i40e_aqc_get_switch_config_resp *buf, u16 buf_size, u16 *start_seid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_switch_seid *scfg = (struct i40e_aqc_switch_seid *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_switch_config); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); scfg->seid = CPU_TO_LE16(*start_seid); status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details); *start_seid = LE16_TO_CPU(scfg->seid); return status; } /** * i40e_aq_set_switch_config * @hw: pointer to the hardware structure * @flags: bit flag values to set * @mode: cloud filter mode * @valid_flags: which bit flags to set * @cmd_details: pointer to command details structure or NULL * * Set switch configuration bits **/ enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, u16 flags, u16 valid_flags, u8 mode, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_switch_config *scfg = (struct i40e_aqc_set_switch_config *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_switch_config); scfg->flags = CPU_TO_LE16(flags); scfg->valid_flags = CPU_TO_LE16(valid_flags); scfg->mode = mode; if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { scfg->switch_tag = CPU_TO_LE16(hw->switch_tag); scfg->first_tag = CPU_TO_LE16(hw->first_tag); scfg->second_tag = CPU_TO_LE16(hw->second_tag); } status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_firmware_version * @hw: pointer to the hw struct * @fw_major_version: firmware major version * @fw_minor_version: firmware minor version * @fw_build: firmware build number * @api_major_version: major queue version * @api_minor_version: minor queue version * @cmd_details: pointer to command details structure or NULL * * Get the firmware version from the admin queue commands **/ enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw, u16 *fw_major_version, u16 *fw_minor_version, u32 *fw_build, u16 *api_major_version, u16 *api_minor_version, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_version *resp = (struct i40e_aqc_get_version *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status == I40E_SUCCESS) { if (fw_major_version != NULL) *fw_major_version = LE16_TO_CPU(resp->fw_major); if (fw_minor_version != NULL) *fw_minor_version = LE16_TO_CPU(resp->fw_minor); if (fw_build != NULL) *fw_build = LE32_TO_CPU(resp->fw_build); if (api_major_version != NULL) *api_major_version = LE16_TO_CPU(resp->api_major); if (api_minor_version != NULL) *api_minor_version = LE16_TO_CPU(resp->api_minor); /* A workaround to fix the API version in SW */ if (api_major_version && api_minor_version && fw_major_version && fw_minor_version && ((*api_major_version == 1) && (*api_minor_version == 1)) && (((*fw_major_version == 4) && (*fw_minor_version >= 2)) || (*fw_major_version > 4))) *api_minor_version = 2; } return status; } /** * i40e_aq_send_driver_version * @hw: pointer to the hw struct * @dv: driver's major, minor version * @cmd_details: pointer to command details structure or NULL * * Send the driver version to the firmware **/ enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw, struct i40e_driver_version *dv, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_driver_version *cmd = (struct i40e_aqc_driver_version *)&desc.params.raw; enum i40e_status_code status; u16 len; if (dv == NULL) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); cmd->driver_major_ver = dv->major_version; cmd->driver_minor_ver = dv->minor_version; cmd->driver_build_ver = dv->build_version; cmd->driver_subbuild_ver = dv->subbuild_version; len = 0; while (len < sizeof(dv->driver_string) && (dv->driver_string[len] < 0x80) && dv->driver_string[len]) len++; status = i40e_asq_send_command(hw, &desc, dv->driver_string, len, cmd_details); return status; } /** * i40e_get_link_status - get status of the HW network link * @hw: pointer to the hw struct * @link_up: pointer to bool (TRUE/FALSE = linkup/linkdown) * * Variable link_up TRUE if link is up, FALSE if link is down. * The variable link_up is invalid if returned value of status != I40E_SUCCESS * * Side effect: LinkStatusEvent reporting becomes enabled **/ enum i40e_status_code i40e_get_link_status(struct i40e_hw *hw, bool *link_up) { enum i40e_status_code status = I40E_SUCCESS; if (hw->phy.get_link_info) { status = i40e_update_link_info(hw); if (status != I40E_SUCCESS) i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n", status); } *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; return status; } /** * i40e_updatelink_status - update status of the HW network link * @hw: pointer to the hw struct **/ enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw) { struct i40e_aq_get_phy_abilities_resp abilities; enum i40e_status_code status = I40E_SUCCESS; status = i40e_aq_get_link_info(hw, TRUE, NULL, NULL); if (status) return status; /* extra checking needed to ensure link info to user is timely */ if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) && ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) || !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) { status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities, NULL); if (status) return status; hw->phy.link_info.req_fec_info = abilities.fec_cfg_curr_mod_ext_info & (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS); i40e_memcpy(hw->phy.link_info.module_type, &abilities.module_type, sizeof(hw->phy.link_info.module_type), I40E_NONDMA_TO_NONDMA); } return status; } /** * i40e_get_link_speed * @hw: pointer to the hw struct * * Returns the link speed of the adapter. **/ enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw) { enum i40e_aq_link_speed speed = I40E_LINK_SPEED_UNKNOWN; enum i40e_status_code status = I40E_SUCCESS; if (hw->phy.get_link_info) { status = i40e_aq_get_link_info(hw, TRUE, NULL, NULL); if (status != I40E_SUCCESS) goto i40e_link_speed_exit; } speed = hw->phy.link_info.link_speed; i40e_link_speed_exit: return speed; } /** * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC * @hw: pointer to the hw struct * @uplink_seid: the MAC or other gizmo SEID * @downlink_seid: the VSI SEID * @enabled_tc: bitmap of TCs to be enabled * @default_port: TRUE for default port VSI, FALSE for control port * @veb_seid: pointer to where to put the resulting VEB SEID * @enable_stats: TRUE to turn on VEB stats * @cmd_details: pointer to command details structure or NULL * * This asks the FW to add a VEB between the uplink and downlink * elements. If the uplink SEID is 0, this will be a floating VEB. **/ enum i40e_status_code i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, u16 downlink_seid, u8 enabled_tc, bool default_port, u16 *veb_seid, bool enable_stats, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_veb *cmd = (struct i40e_aqc_add_veb *)&desc.params.raw; struct i40e_aqc_add_veb_completion *resp = (struct i40e_aqc_add_veb_completion *)&desc.params.raw; enum i40e_status_code status; u16 veb_flags = 0; /* SEIDs need to either both be set or both be 0 for floating VEB */ if (!!uplink_seid != !!downlink_seid) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); cmd->uplink_seid = CPU_TO_LE16(uplink_seid); cmd->downlink_seid = CPU_TO_LE16(downlink_seid); cmd->enable_tcs = enabled_tc; if (!uplink_seid) veb_flags |= I40E_AQC_ADD_VEB_FLOATING; if (default_port) veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT; else veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; /* reverse logic here: set the bitflag to disable the stats */ if (!enable_stats) veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS; cmd->veb_flags = CPU_TO_LE16(veb_flags); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status && veb_seid) *veb_seid = LE16_TO_CPU(resp->veb_seid); return status; } /** * i40e_aq_get_veb_parameters - Retrieve VEB parameters * @hw: pointer to the hw struct * @veb_seid: the SEID of the VEB to query * @switch_id: the uplink switch id * @floating: set to TRUE if the VEB is floating * @statistic_index: index of the stats counter block for this VEB * @vebs_used: number of VEB's used by function * @vebs_free: total VEB's not reserved by any function * @cmd_details: pointer to command details structure or NULL * * This retrieves the parameters for a particular VEB, specified by * uplink_seid, and returns them to the caller. **/ enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw, u16 veb_seid, u16 *switch_id, bool *floating, u16 *statistic_index, u16 *vebs_used, u16 *vebs_free, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_veb_parameters_completion *cmd_resp = (struct i40e_aqc_get_veb_parameters_completion *) &desc.params.raw; enum i40e_status_code status; if (veb_seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_veb_parameters); cmd_resp->seid = CPU_TO_LE16(veb_seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status) goto get_veb_exit; if (switch_id) *switch_id = LE16_TO_CPU(cmd_resp->switch_id); if (statistic_index) *statistic_index = LE16_TO_CPU(cmd_resp->statistic_index); if (vebs_used) *vebs_used = LE16_TO_CPU(cmd_resp->vebs_used); if (vebs_free) *vebs_free = LE16_TO_CPU(cmd_resp->vebs_free); if (floating) { u16 flags = LE16_TO_CPU(cmd_resp->veb_flags); if (flags & I40E_AQC_ADD_VEB_FLOATING) *floating = TRUE; else *floating = FALSE; } get_veb_exit: return status; } /** * i40e_aq_add_macvlan * @hw: pointer to the hw struct * @seid: VSI for the mac address * @mv_list: list of macvlans to be added * @count: length of the list * @cmd_details: pointer to command details structure or NULL * * Add MAC/VLAN addresses to the HW filtering **/ enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, struct i40e_aqc_add_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_macvlan *cmd = (struct i40e_aqc_macvlan *)&desc.params.raw; enum i40e_status_code status; u16 buf_size; int i; if (count == 0 || !mv_list || !hw) return I40E_ERR_PARAM; buf_size = count * sizeof(*mv_list); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan); cmd->num_addresses = CPU_TO_LE16(count); cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); cmd->seid[1] = 0; cmd->seid[2] = 0; for (i = 0; i < count; i++) if (I40E_IS_MULTICAST(mv_list[i].mac_addr)) mv_list[i].flags |= CPU_TO_LE16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, cmd_details); return status; } /** * i40e_aq_remove_macvlan * @hw: pointer to the hw struct * @seid: VSI for the mac address * @mv_list: list of macvlans to be removed * @count: length of the list * @cmd_details: pointer to command details structure or NULL * * Remove MAC/VLAN addresses from the HW filtering **/ enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, struct i40e_aqc_remove_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_macvlan *cmd = (struct i40e_aqc_macvlan *)&desc.params.raw; enum i40e_status_code status; u16 buf_size; if (count == 0 || !mv_list || !hw) return I40E_ERR_PARAM; buf_size = count * sizeof(*mv_list); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); cmd->num_addresses = CPU_TO_LE16(count); cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); cmd->seid[1] = 0; cmd->seid[2] = 0; desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, cmd_details); return status; } /** * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule * @hw: pointer to the hw struct * @opcode: AQ opcode for add or delete mirror rule * @sw_seid: Switch SEID (to which rule refers) * @rule_type: Rule Type (ingress/egress/VLAN) * @id: Destination VSI SEID or Rule ID * @count: length of the list * @mr_list: list of mirrored VSI SEIDs or VLAN IDs * @cmd_details: pointer to command details structure or NULL * @rule_id: Rule ID returned from FW * @rules_used: Number of rules used in internal switch * @rules_free: Number of rules free in internal switch * * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for * VEBs/VEPA elements only **/ static enum i40e_status_code i40e_mirrorrule_op(struct i40e_hw *hw, u16 opcode, u16 sw_seid, u16 rule_type, u16 id, u16 count, __le16 *mr_list, struct i40e_asq_cmd_details *cmd_details, u16 *rule_id, u16 *rules_used, u16 *rules_free) { struct i40e_aq_desc desc; struct i40e_aqc_add_delete_mirror_rule *cmd = (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; struct i40e_aqc_add_delete_mirror_rule_completion *resp = (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; enum i40e_status_code status; u16 buf_size; buf_size = count * sizeof(*mr_list); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, opcode); cmd->seid = CPU_TO_LE16(sw_seid); cmd->rule_type = CPU_TO_LE16(rule_type & I40E_AQC_MIRROR_RULE_TYPE_MASK); cmd->num_entries = CPU_TO_LE16(count); /* Dest VSI for add, rule_id for delete */ cmd->destination = CPU_TO_LE16(id); if (mr_list) { desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); } status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, cmd_details); if (status == I40E_SUCCESS || hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { if (rule_id) *rule_id = LE16_TO_CPU(resp->rule_id); if (rules_used) *rules_used = LE16_TO_CPU(resp->mirror_rules_used); if (rules_free) *rules_free = LE16_TO_CPU(resp->mirror_rules_free); } return status; } /** * i40e_aq_add_mirrorrule - add a mirror rule * @hw: pointer to the hw struct * @sw_seid: Switch SEID (to which rule refers) * @rule_type: Rule Type (ingress/egress/VLAN) * @dest_vsi: SEID of VSI to which packets will be mirrored * @count: length of the list * @mr_list: list of mirrored VSI SEIDs or VLAN IDs * @cmd_details: pointer to command details structure or NULL * @rule_id: Rule ID returned from FW * @rules_used: Number of rules used in internal switch * @rules_free: Number of rules free in internal switch * * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only **/ enum i40e_status_code i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, struct i40e_asq_cmd_details *cmd_details, u16 *rule_id, u16 *rules_used, u16 *rules_free) { if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { if (count == 0 || !mr_list) return I40E_ERR_PARAM; } return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, rule_type, dest_vsi, count, mr_list, cmd_details, rule_id, rules_used, rules_free); } /** * i40e_aq_delete_mirrorrule - delete a mirror rule * @hw: pointer to the hw struct * @sw_seid: Switch SEID (to which rule refers) * @rule_type: Rule Type (ingress/egress/VLAN) * @count: length of the list * @rule_id: Rule ID that is returned in the receive desc as part of * add_mirrorrule. * @mr_list: list of mirrored VLAN IDs to be removed * @cmd_details: pointer to command details structure or NULL * @rules_used: Number of rules used in internal switch * @rules_free: Number of rules free in internal switch * * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only **/ enum i40e_status_code i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, struct i40e_asq_cmd_details *cmd_details, u16 *rules_used, u16 *rules_free) { /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { /* count and mr_list shall be valid for rule_type INGRESS VLAN * mirroring. For other rule_type, count and rule_type should * not matter. */ if (count == 0 || !mr_list) return I40E_ERR_PARAM; } return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, rule_type, rule_id, count, mr_list, cmd_details, NULL, rules_used, rules_free); } /** * i40e_aq_add_vlan - Add VLAN ids to the HW filtering * @hw: pointer to the hw struct * @seid: VSI for the vlan filters * @v_list: list of vlan filters to be added * @count: length of the list * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid, struct i40e_aqc_add_remove_vlan_element_data *v_list, u8 count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_macvlan *cmd = (struct i40e_aqc_macvlan *)&desc.params.raw; enum i40e_status_code status; u16 buf_size; if (count == 0 || !v_list || !hw) return I40E_ERR_PARAM; buf_size = count * sizeof(*v_list); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan); cmd->num_addresses = CPU_TO_LE16(count); cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID); cmd->seid[1] = 0; cmd->seid[2] = 0; desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, v_list, buf_size, cmd_details); return status; } /** * i40e_aq_remove_vlan - Remove VLANs from the HW filtering * @hw: pointer to the hw struct * @seid: VSI for the vlan filters * @v_list: list of macvlans to be removed * @count: length of the list * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid, struct i40e_aqc_add_remove_vlan_element_data *v_list, u8 count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_macvlan *cmd = (struct i40e_aqc_macvlan *)&desc.params.raw; enum i40e_status_code status; u16 buf_size; if (count == 0 || !v_list || !hw) return I40E_ERR_PARAM; buf_size = count * sizeof(*v_list); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan); cmd->num_addresses = CPU_TO_LE16(count); cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID); cmd->seid[1] = 0; cmd->seid[2] = 0; desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, v_list, buf_size, cmd_details); return status; } /** * i40e_aq_send_msg_to_vf * @hw: pointer to the hardware structure * @vfid: vf id to send msg * @v_opcode: opcodes for VF-PF communication * @v_retval: return error code * @msg: pointer to the msg buffer * @msglen: msg length * @cmd_details: pointer to command details * * send msg to vf **/ enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_pf_vf_message *cmd = (struct i40e_aqc_pf_vf_message *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); cmd->id = CPU_TO_LE32(vfid); desc.cookie_high = CPU_TO_LE32(v_opcode); desc.cookie_low = CPU_TO_LE32(v_retval); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI); if (msglen) { desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (msglen > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); desc.datalen = CPU_TO_LE16(msglen); } status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details); return status; } /** * i40e_aq_debug_read_register * @hw: pointer to the hw struct * @reg_addr: register address * @reg_val: register value * @cmd_details: pointer to command details structure or NULL * * Read the register using the admin queue commands **/ enum i40e_status_code i40e_aq_debug_read_register(struct i40e_hw *hw, u32 reg_addr, u64 *reg_val, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_debug_reg_read_write *cmd_resp = (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; enum i40e_status_code status; if (reg_val == NULL) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); cmd_resp->address = CPU_TO_LE32(reg_addr); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status == I40E_SUCCESS) { *reg_val = ((u64)LE32_TO_CPU(cmd_resp->value_high) << 32) | (u64)LE32_TO_CPU(cmd_resp->value_low); } return status; } /** * i40e_aq_debug_write_register * @hw: pointer to the hw struct * @reg_addr: register address * @reg_val: register value * @cmd_details: pointer to command details structure or NULL * * Write to a register using the admin queue commands **/ enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw, u32 reg_addr, u64 reg_val, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_debug_reg_read_write *cmd = (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); cmd->address = CPU_TO_LE32(reg_addr); cmd->value_high = CPU_TO_LE32((u32)(reg_val >> 32)); cmd->value_low = CPU_TO_LE32((u32)(reg_val & 0xFFFFFFFF)); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_request_resource * @hw: pointer to the hw struct * @resource: resource id * @access: access type * @sdp_number: resource number * @timeout: the maximum time in ms that the driver may hold the resource * @cmd_details: pointer to command details structure or NULL * * requests common resource using the admin queue commands **/ enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw, enum i40e_aq_resources_ids resource, enum i40e_aq_resource_access_type access, u8 sdp_number, u64 *timeout, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_request_resource *cmd_resp = (struct i40e_aqc_request_resource *)&desc.params.raw; enum i40e_status_code status; DEBUGFUNC("i40e_aq_request_resource"); i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); cmd_resp->resource_id = CPU_TO_LE16(resource); cmd_resp->access_type = CPU_TO_LE16(access); cmd_resp->resource_number = CPU_TO_LE32(sdp_number); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); /* The completion specifies the maximum time in ms that the driver * may hold the resource in the Timeout field. * If the resource is held by someone else, the command completes with * busy return value and the timeout field indicates the maximum time * the current owner of the resource has to free it. */ if (status == I40E_SUCCESS || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) *timeout = LE32_TO_CPU(cmd_resp->timeout); return status; } /** * i40e_aq_release_resource * @hw: pointer to the hw struct * @resource: resource id * @sdp_number: resource number * @cmd_details: pointer to command details structure or NULL * * release common resource using the admin queue commands **/ enum i40e_status_code i40e_aq_release_resource(struct i40e_hw *hw, enum i40e_aq_resources_ids resource, u8 sdp_number, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_request_resource *cmd = (struct i40e_aqc_request_resource *)&desc.params.raw; enum i40e_status_code status; DEBUGFUNC("i40e_aq_release_resource"); i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); cmd->resource_id = CPU_TO_LE16(resource); cmd->resource_number = CPU_TO_LE32(sdp_number); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_read_nvm * @hw: pointer to the hw struct * @module_pointer: module pointer location in words from the NVM beginning * @offset: byte offset from the module beginning * @length: length of the section to be read (in bytes from the offset) * @data: command buffer (size [bytes] = length) * @last_command: tells if this is the last command in a series * @cmd_details: pointer to command details structure or NULL * * Read the NVM using the admin queue commands **/ enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, void *data, bool last_command, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_nvm_update *cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; enum i40e_status_code status; DEBUGFUNC("i40e_aq_read_nvm"); /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) { status = I40E_ERR_PARAM; goto i40e_aq_read_nvm_exit; } i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read); /* If this is the last command in a series, set the proper flag. */ if (last_command) cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; cmd->module_pointer = module_pointer; cmd->offset = CPU_TO_LE32(offset); cmd->length = CPU_TO_LE16(length); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (length > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); i40e_aq_read_nvm_exit: return status; } /** * i40e_aq_read_nvm_config - read an nvm config block * @hw: pointer to the hw struct * @cmd_flags: NVM access admin command bits * @field_id: field or feature id * @data: buffer for result * @buf_size: buffer size * @element_count: pointer to count of elements read by FW * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_read_nvm_config(struct i40e_hw *hw, u8 cmd_flags, u32 field_id, void *data, u16 buf_size, u16 *element_count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_nvm_config_read *cmd = (struct i40e_aqc_nvm_config_read *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_read); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); cmd->cmd_flags = CPU_TO_LE16(cmd_flags); cmd->element_id = CPU_TO_LE16((u16)(0xffff & field_id)); if (cmd_flags & I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK) cmd->element_id_msw = CPU_TO_LE16((u16)(field_id >> 16)); else cmd->element_id_msw = 0; status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details); if (!status && element_count) *element_count = LE16_TO_CPU(cmd->element_count); return status; } /** * i40e_aq_write_nvm_config - write an nvm config block * @hw: pointer to the hw struct * @cmd_flags: NVM access admin command bits * @data: buffer for result * @buf_size: buffer size * @element_count: count of elements to be written * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw, u8 cmd_flags, void *data, u16 buf_size, u16 element_count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_nvm_config_write *cmd = (struct i40e_aqc_nvm_config_write *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_write); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); cmd->element_count = CPU_TO_LE16(element_count); cmd->cmd_flags = CPU_TO_LE16(cmd_flags); status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details); return status; } /** * i40e_aq_oem_post_update - triggers an OEM specific flow after update * @hw: pointer to the hw struct * @buff: buffer for result * @buff_size: buffer size * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw, void *buff, u16 buff_size, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_oem_post_update); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status && LE16_TO_CPU(desc.retval) == I40E_AQ_RC_ESRCH) status = I40E_ERR_NOT_IMPLEMENTED; return status; } /** * i40e_aq_erase_nvm * @hw: pointer to the hw struct * @module_pointer: module pointer location in words from the NVM beginning * @offset: offset in the module (expressed in 4 KB from module's beginning) * @length: length of the section to be erased (expressed in 4 KB) * @last_command: tells if this is the last command in a series * @cmd_details: pointer to command details structure or NULL * * Erase the NVM sector using the admin queue commands **/ enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, bool last_command, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_nvm_update *cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; enum i40e_status_code status; DEBUGFUNC("i40e_aq_erase_nvm"); /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) { status = I40E_ERR_PARAM; goto i40e_aq_erase_nvm_exit; } i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase); /* If this is the last command in a series, set the proper flag. */ if (last_command) cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; cmd->module_pointer = module_pointer; cmd->offset = CPU_TO_LE32(offset); cmd->length = CPU_TO_LE16(length); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); i40e_aq_erase_nvm_exit: return status; } /** * i40e_parse_discover_capabilities * @hw: pointer to the hw struct * @buff: pointer to a buffer containing device/function capability records * @cap_count: number of capability records in the list * @list_type_opc: type of capabilities list to parse * * Parse the device/function capabilities list. **/ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, u32 cap_count, enum i40e_admin_queue_opc list_type_opc) { struct i40e_aqc_list_capabilities_element_resp *cap; u32 valid_functions, num_functions; u32 number, logical_id, phys_id; struct i40e_hw_capabilities *p; enum i40e_status_code status; u16 id, ocp_cfg_word0; u8 major_rev; u32 i = 0; cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) p = (struct i40e_hw_capabilities *)&hw->dev_caps; else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) p = (struct i40e_hw_capabilities *)&hw->func_caps; else return; for (i = 0; i < cap_count; i++, cap++) { id = LE16_TO_CPU(cap->id); number = LE32_TO_CPU(cap->number); logical_id = LE32_TO_CPU(cap->logical_id); phys_id = LE32_TO_CPU(cap->phys_id); major_rev = cap->major_rev; switch (id) { case I40E_AQ_CAP_ID_SWITCH_MODE: p->switch_mode = number; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: Switch mode = %d\n", p->switch_mode); break; case I40E_AQ_CAP_ID_MNG_MODE: p->management_mode = number; if (major_rev > 1) { p->mng_protocols_over_mctp = logical_id; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: Protocols over MCTP = %d\n", p->mng_protocols_over_mctp); } else { p->mng_protocols_over_mctp = 0; } i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: Management Mode = %d\n", p->management_mode); break; case I40E_AQ_CAP_ID_NPAR_ACTIVE: p->npar_enable = number; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: NPAR enable = %d\n", p->npar_enable); break; case I40E_AQ_CAP_ID_OS2BMC_CAP: p->os2bmc = number; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: OS2BMC = %d\n", p->os2bmc); break; case I40E_AQ_CAP_ID_FUNCTIONS_VALID: p->valid_functions = number; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: Valid Functions = %d\n", p->valid_functions); break; case I40E_AQ_CAP_ID_SRIOV: if (number == 1) p->sr_iov_1_1 = TRUE; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: SR-IOV = %d\n", p->sr_iov_1_1); break; case I40E_AQ_CAP_ID_VF: p->num_vfs = number; p->vf_base_id = logical_id; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: VF count = %d\n", p->num_vfs); i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: VF base_id = %d\n", p->vf_base_id); break; case I40E_AQ_CAP_ID_VMDQ: if (number == 1) p->vmdq = TRUE; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: VMDQ = %d\n", p->vmdq); break; case I40E_AQ_CAP_ID_8021QBG: if (number == 1) p->evb_802_1_qbg = TRUE; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: 802.1Qbg = %d\n", number); break; case I40E_AQ_CAP_ID_8021QBR: if (number == 1) p->evb_802_1_qbh = TRUE; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: 802.1Qbh = %d\n", number); break; case I40E_AQ_CAP_ID_VSI: p->num_vsis = number; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: VSI count = %d\n", p->num_vsis); break; case I40E_AQ_CAP_ID_DCB: if (number == 1) { p->dcb = TRUE; p->enabled_tcmap = logical_id; p->maxtc = phys_id; } i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: DCB = %d\n", p->dcb); i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: TC Mapping = %d\n", logical_id); i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: TC Max = %d\n", p->maxtc); break; case I40E_AQ_CAP_ID_FCOE: if (number == 1) p->fcoe = TRUE; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: FCOE = %d\n", p->fcoe); break; case I40E_AQ_CAP_ID_ISCSI: if (number == 1) p->iscsi = TRUE; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: iSCSI = %d\n", p->iscsi); break; case I40E_AQ_CAP_ID_RSS: p->rss = TRUE; p->rss_table_size = number; p->rss_table_entry_width = logical_id; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: RSS = %d\n", p->rss); i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: RSS table size = %d\n", p->rss_table_size); i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: RSS table width = %d\n", p->rss_table_entry_width); break; case I40E_AQ_CAP_ID_RXQ: p->num_rx_qp = number; p->base_queue = phys_id; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: Rx QP = %d\n", number); i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: base_queue = %d\n", p->base_queue); break; case I40E_AQ_CAP_ID_TXQ: p->num_tx_qp = number; p->base_queue = phys_id; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: Tx QP = %d\n", number); i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: base_queue = %d\n", p->base_queue); break; case I40E_AQ_CAP_ID_MSIX: p->num_msix_vectors = number; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: MSIX vector count = %d\n", p->num_msix_vectors); break; case I40E_AQ_CAP_ID_VF_MSIX: p->num_msix_vectors_vf = number; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: MSIX VF vector count = %d\n", p->num_msix_vectors_vf); break; case I40E_AQ_CAP_ID_FLEX10: if (major_rev == 1) { if (number == 1) { p->flex10_enable = TRUE; p->flex10_capable = TRUE; } } else { /* Capability revision >= 2 */ if (number & 1) p->flex10_enable = TRUE; if (number & 2) p->flex10_capable = TRUE; } p->flex10_mode = logical_id; p->flex10_status = phys_id; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: Flex10 mode = %d\n", p->flex10_mode); i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: Flex10 status = %d\n", p->flex10_status); break; case I40E_AQ_CAP_ID_CEM: if (number == 1) p->mgmt_cem = TRUE; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: CEM = %d\n", p->mgmt_cem); break; case I40E_AQ_CAP_ID_IWARP: if (number == 1) p->iwarp = TRUE; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: iWARP = %d\n", p->iwarp); break; case I40E_AQ_CAP_ID_LED: if (phys_id < I40E_HW_CAP_MAX_GPIO) p->led[phys_id] = TRUE; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: LED - PIN %d\n", phys_id); break; case I40E_AQ_CAP_ID_SDP: if (phys_id < I40E_HW_CAP_MAX_GPIO) p->sdp[phys_id] = TRUE; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: SDP - PIN %d\n", phys_id); break; case I40E_AQ_CAP_ID_MDIO: if (number == 1) { p->mdio_port_num = phys_id; p->mdio_port_mode = logical_id; } i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: MDIO port number = %d\n", p->mdio_port_num); i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: MDIO port mode = %d\n", p->mdio_port_mode); break; case I40E_AQ_CAP_ID_1588: if (number == 1) p->ieee_1588 = TRUE; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: IEEE 1588 = %d\n", p->ieee_1588); break; case I40E_AQ_CAP_ID_FLOW_DIRECTOR: p->fd = TRUE; p->fd_filters_guaranteed = number; p->fd_filters_best_effort = logical_id; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: Flow Director = 1\n"); i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: Guaranteed FD filters = %d\n", p->fd_filters_guaranteed); break; case I40E_AQ_CAP_ID_WSR_PROT: p->wr_csr_prot = (u64)number; p->wr_csr_prot |= (u64)logical_id << 32; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: wr_csr_prot = 0x%llX\n\n", (p->wr_csr_prot & 0xffff)); break; case I40E_AQ_CAP_ID_NVM_MGMT: if (number & I40E_NVM_MGMT_SEC_REV_DISABLED) p->sec_rev_disabled = TRUE; if (number & I40E_NVM_MGMT_UPDATE_DISABLED) p->update_disabled = TRUE; break; case I40E_AQ_CAP_ID_WOL_AND_PROXY: hw->num_wol_proxy_filters = (u16)number; hw->wol_proxy_vsi_seid = (u16)logical_id; p->apm_wol_support = phys_id & I40E_WOL_SUPPORT_MASK; if (phys_id & I40E_ACPI_PROGRAMMING_METHOD_MASK) p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK; else p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_HW_FVL; p->proxy_support = (phys_id & I40E_PROXY_SUPPORT_MASK) ? 1 : 0; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: WOL proxy filters = %d\n", hw->num_wol_proxy_filters); break; default: break; } } if (p->fcoe) i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); /* Always disable FCoE if compiled without the I40E_FCOE_ENA flag */ p->fcoe = FALSE; /* count the enabled ports (aka the "not disabled" ports) */ hw->num_ports = 0; for (i = 0; i < 4; i++) { u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i); u64 port_cfg = 0; /* use AQ read to get the physical register offset instead * of the port relative offset */ i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK)) hw->num_ports++; } /* OCP cards case: if a mezz is removed the ethernet port is at * disabled state in PRTGEN_CNF register. Additional NVM read is * needed in order to check if we are dealing with OCP card. * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting * physical ports results in wrong partition id calculation and thus * not supporting WoL. */ if (hw->mac.type == I40E_MAC_X722) { if (i40e_acquire_nvm(hw, I40E_RESOURCE_READ) == I40E_SUCCESS) { status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, 2 * I40E_SR_OCP_CFG_WORD0, sizeof(ocp_cfg_word0), &ocp_cfg_word0, TRUE, NULL); if (status == I40E_SUCCESS && (ocp_cfg_word0 & I40E_SR_OCP_ENABLED)) hw->num_ports = 4; i40e_release_nvm(hw); } } valid_functions = p->valid_functions; num_functions = 0; while (valid_functions) { if (valid_functions & 1) num_functions++; valid_functions >>= 1; } /* partition id is 1-based, and functions are evenly spread * across the ports as partitions */ if (hw->num_ports != 0) { hw->partition_id = (hw->pf_id / hw->num_ports) + 1; hw->num_partitions = num_functions / hw->num_ports; } /* additional HW specific goodies that might * someday be HW version specific */ p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS; } /** * i40e_aq_discover_capabilities * @hw: pointer to the hw struct * @buff: a virtual buffer to hold the capabilities * @buff_size: Size of the virtual buffer * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM * @list_type_opc: capabilities type to discover - pass in the command opcode * @cmd_details: pointer to command details structure or NULL * * Get the device capabilities descriptions from the firmware **/ enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw, void *buff, u16 buff_size, u16 *data_size, enum i40e_admin_queue_opc list_type_opc, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aqc_list_capabilites *cmd; struct i40e_aq_desc desc; enum i40e_status_code status = I40E_SUCCESS; cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; if (list_type_opc != i40e_aqc_opc_list_func_capabilities && list_type_opc != i40e_aqc_opc_list_dev_capabilities) { status = I40E_ERR_PARAM; goto exit; } i40e_fill_default_direct_cmd_desc(&desc, list_type_opc); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); *data_size = LE16_TO_CPU(desc.datalen); if (status) goto exit; i40e_parse_discover_capabilities(hw, buff, LE32_TO_CPU(cmd->count), list_type_opc); exit: return status; } /** * i40e_aq_update_nvm * @hw: pointer to the hw struct * @module_pointer: module pointer location in words from the NVM beginning * @offset: byte offset from the module beginning * @length: length of the section to be written (in bytes from the offset) * @data: command buffer (size [bytes] = length) * @last_command: tells if this is the last command in a series * @preservation_flags: Preservation mode flags * @cmd_details: pointer to command details structure or NULL * * Update the NVM using the admin queue commands **/ enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, void *data, bool last_command, u8 preservation_flags, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_nvm_update *cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; enum i40e_status_code status; DEBUGFUNC("i40e_aq_update_nvm"); /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) { status = I40E_ERR_PARAM; goto i40e_aq_update_nvm_exit; } i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); /* If this is the last command in a series, set the proper flag. */ if (last_command) cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; if (hw->mac.type == I40E_MAC_X722) { if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED) cmd->command_flags |= (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED << I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL) cmd->command_flags |= (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL << I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); } cmd->module_pointer = module_pointer; cmd->offset = CPU_TO_LE32(offset); cmd->length = CPU_TO_LE16(length); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (length > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); i40e_aq_update_nvm_exit: return status; } /** + * i40e_aq_rearrange_nvm + * @hw: pointer to the hw struct + * @rearrange_nvm: defines direction of rearrangement + * @cmd_details: pointer to command details structure or NULL + * + * Rearrange NVM structure, available only for transition FW + **/ +enum i40e_status_code i40e_aq_rearrange_nvm(struct i40e_hw *hw, + u8 rearrange_nvm, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aqc_nvm_update *cmd; + enum i40e_status_code status; + struct i40e_aq_desc desc; + + DEBUGFUNC("i40e_aq_rearrange_nvm"); + + cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); + + rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT | + I40E_AQ_NVM_REARRANGE_TO_STRUCT); + + if (!rearrange_nvm) { + status = I40E_ERR_PARAM; + goto i40e_aq_rearrange_nvm_exit; + } + + cmd->command_flags |= rearrange_nvm; + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + +i40e_aq_rearrange_nvm_exit: + return status; +} + +/** * i40e_aq_nvm_progress * @hw: pointer to the hw struct * @progress: pointer to progress returned from AQ * @cmd_details: pointer to command details structure or NULL * * Gets progress of flash rearrangement process **/ enum i40e_status_code i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress, struct i40e_asq_cmd_details *cmd_details) { enum i40e_status_code status; struct i40e_aq_desc desc; DEBUGFUNC("i40e_aq_nvm_progress"); i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_progress); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); *progress = desc.params.raw[0]; return status; } /** * i40e_aq_get_lldp_mib * @hw: pointer to the hw struct * @bridge_type: type of bridge requested * @mib_type: Local, Remote or both Local and Remote MIBs * @buff: pointer to a user supplied buffer to store the MIB block * @buff_size: size of the buffer (in bytes) * @local_len : length of the returned Local LLDP MIB * @remote_len: length of the returned Remote LLDP MIB * @cmd_details: pointer to command details structure or NULL * * Requests the complete LLDP MIB (entire packet). **/ enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, u8 mib_type, void *buff, u16 buff_size, u16 *local_len, u16 *remote_len, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_get_mib *cmd = (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; struct i40e_aqc_lldp_get_mib *resp = (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; enum i40e_status_code status; if (buff_size == 0 || !buff) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); /* Indirect Command */ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); desc.datalen = CPU_TO_LE16(buff_size); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); if (!status) { if (local_len != NULL) *local_len = LE16_TO_CPU(resp->local_len); if (remote_len != NULL) *remote_len = LE16_TO_CPU(resp->remote_len); } return status; } /** * i40e_aq_set_lldp_mib - Set the LLDP MIB * @hw: pointer to the hw struct * @mib_type: Local, Remote or both Local and Remote MIBs * @buff: pointer to a user supplied buffer to store the MIB block * @buff_size: size of the buffer (in bytes) * @cmd_details: pointer to command details structure or NULL * * Set the LLDP MIB. **/ enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw, u8 mib_type, void *buff, u16 buff_size, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_set_local_mib *cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw; enum i40e_status_code status; if (buff_size == 0 || !buff) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_set_local_mib); /* Indirect Command */ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); desc.datalen = CPU_TO_LE16(buff_size); cmd->type = mib_type; cmd->length = CPU_TO_LE16(buff_size); - cmd->address_high = CPU_TO_LE32(I40E_HI_WORD((u64)buff)); + cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)buff)); cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)buff)); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); return status; } /** * i40e_aq_cfg_lldp_mib_change_event * @hw: pointer to the hw struct * @enable_update: Enable or Disable event posting * @cmd_details: pointer to command details structure or NULL * * Enable or Disable posting of an event on ARQ when LLDP MIB * associated with the interface changes **/ enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, bool enable_update, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_update_mib *cmd = (struct i40e_aqc_lldp_update_mib *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); if (!enable_update) cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** - * i40e_aq_add_lldp_tlv + * i40e_aq_restore_lldp * @hw: pointer to the hw struct - * @bridge_type: type of bridge - * @buff: buffer with TLV to add - * @buff_size: length of the buffer - * @tlv_len: length of the TLV to be added - * @mib_len: length of the LLDP MIB returned in response + * @setting: pointer to factory setting variable or NULL + * @restore: True if factory settings should be restored * @cmd_details: pointer to command details structure or NULL * - * Add the specified TLV to LLDP Local MIB for the given bridge type, - * it is responsibility of the caller to make sure that the TLV is not - * already present in the LLDPDU. - * In return firmware will write the complete LLDP MIB with the newly - * added TLV in the response buffer. + * Restore LLDP Agent factory settings if @restore set to True. In other case + * only returns factory setting in AQ response. **/ -enum i40e_status_code i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type, - void *buff, u16 buff_size, u16 tlv_len, - u16 *mib_len, - struct i40e_asq_cmd_details *cmd_details) +enum i40e_status_code +i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; - struct i40e_aqc_lldp_add_tlv *cmd = - (struct i40e_aqc_lldp_add_tlv *)&desc.params.raw; + struct i40e_aqc_lldp_restore *cmd = + (struct i40e_aqc_lldp_restore *)&desc.params.raw; enum i40e_status_code status; - if (buff_size == 0 || !buff || tlv_len == 0) - return I40E_ERR_PARAM; - - i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_add_tlv); - - /* Indirect Command */ - desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); - if (buff_size > I40E_AQ_LARGE_BUF) - desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); - desc.datalen = CPU_TO_LE16(buff_size); - - cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & - I40E_AQ_LLDP_BRIDGE_TYPE_MASK); - cmd->len = CPU_TO_LE16(tlv_len); - - status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); - if (!status) { - if (mib_len != NULL) - *mib_len = LE16_TO_CPU(desc.datalen); + if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) { + i40e_debug(hw, I40E_DEBUG_ALL, + "Restore LLDP not supported by current FW version.\n"); + return I40E_ERR_DEVICE_NOT_SUPPORTED; } - return status; -} + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore); -/** - * i40e_aq_update_lldp_tlv - * @hw: pointer to the hw struct - * @bridge_type: type of bridge - * @buff: buffer with TLV to update - * @buff_size: size of the buffer holding original and updated TLVs - * @old_len: Length of the Original TLV - * @new_len: Length of the Updated TLV - * @offset: offset of the updated TLV in the buff - * @mib_len: length of the returned LLDP MIB - * @cmd_details: pointer to command details structure or NULL - * - * Update the specified TLV to the LLDP Local MIB for the given bridge type. - * Firmware will place the complete LLDP MIB in response buffer with the - * updated TLV. - **/ -enum i40e_status_code i40e_aq_update_lldp_tlv(struct i40e_hw *hw, - u8 bridge_type, void *buff, u16 buff_size, - u16 old_len, u16 new_len, u16 offset, - u16 *mib_len, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_lldp_update_tlv *cmd = - (struct i40e_aqc_lldp_update_tlv *)&desc.params.raw; - enum i40e_status_code status; + if (restore) + cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE; - if (buff_size == 0 || !buff || offset == 0 || - old_len == 0 || new_len == 0) - return I40E_ERR_PARAM; + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); - i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_tlv); + if (setting) + *setting = cmd->command & 1; - /* Indirect Command */ - desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); - if (buff_size > I40E_AQ_LARGE_BUF) - desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); - desc.datalen = CPU_TO_LE16(buff_size); - - cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & - I40E_AQ_LLDP_BRIDGE_TYPE_MASK); - cmd->old_len = CPU_TO_LE16(old_len); - cmd->new_offset = CPU_TO_LE16(offset); - cmd->new_len = CPU_TO_LE16(new_len); - - status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); - if (!status) { - if (mib_len != NULL) - *mib_len = LE16_TO_CPU(desc.datalen); - } - return status; } /** - * i40e_aq_delete_lldp_tlv - * @hw: pointer to the hw struct - * @bridge_type: type of bridge - * @buff: pointer to a user supplied buffer that has the TLV - * @buff_size: length of the buffer - * @tlv_len: length of the TLV to be deleted - * @mib_len: length of the returned LLDP MIB - * @cmd_details: pointer to command details structure or NULL - * - * Delete the specified TLV from LLDP Local MIB for the given bridge type. - * The firmware places the entire LLDP MIB in the response buffer. - **/ -enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw, - u8 bridge_type, void *buff, u16 buff_size, - u16 tlv_len, u16 *mib_len, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_lldp_add_tlv *cmd = - (struct i40e_aqc_lldp_add_tlv *)&desc.params.raw; - enum i40e_status_code status; - - if (buff_size == 0 || !buff) - return I40E_ERR_PARAM; - - i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_delete_tlv); - - /* Indirect Command */ - desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); - if (buff_size > I40E_AQ_LARGE_BUF) - desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); - desc.datalen = CPU_TO_LE16(buff_size); - cmd->len = CPU_TO_LE16(tlv_len); - cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & - I40E_AQ_LLDP_BRIDGE_TYPE_MASK); - - status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); - if (!status) { - if (mib_len != NULL) - *mib_len = LE16_TO_CPU(desc.datalen); - } - - return status; -} - -/** * i40e_aq_stop_lldp * @hw: pointer to the hw struct * @shutdown_agent: True if LLDP Agent needs to be Shutdown + * @persist: True if stop of LLDP should be persistent across power cycles * @cmd_details: pointer to command details structure or NULL * * Stop or Shutdown the embedded LLDP Agent **/ enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, + bool persist, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_stop *cmd = (struct i40e_aqc_lldp_stop *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); if (shutdown_agent) cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; + if (persist) { + if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) + cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST; + else + i40e_debug(hw, I40E_DEBUG_ALL, + "Persistent Stop LLDP not supported by current FW version.\n"); + } + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_start_lldp * @hw: pointer to the hw struct + * @persist: True if start of LLDP should be persistent across power cycles * @cmd_details: pointer to command details structure or NULL * * Start the embedded LLDP Agent on all ports. **/ enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw, + bool persist, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_start *cmd = (struct i40e_aqc_lldp_start *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); cmd->command = I40E_AQ_LLDP_AGENT_START; + + if (persist) { + if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) + cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST; + else + i40e_debug(hw, I40E_DEBUG_ALL, + "Persistent Start LLDP not supported by current FW version.\n"); + } + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_dcb_parameters * @hw: pointer to the hw struct * @cmd_details: pointer to command details structure or NULL * @dcb_enable: True if DCB configuration needs to be applied * **/ enum i40e_status_code i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_dcb_parameters *cmd = (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; enum i40e_status_code status; - if ((hw->mac.type != I40E_MAC_XL710) || - ((hw->aq.api_maj_ver < 1) || - ((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 6)))) + if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) return I40E_ERR_DEVICE_NOT_SUPPORTED; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_dcb_parameters); if (dcb_enable) { cmd->valid_flags = I40E_DCB_VALID; cmd->command = I40E_AQ_DCB_SET_AGENT; } status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_cee_dcb_config * @hw: pointer to the hw struct * @buff: response buffer that stores CEE operational configuration * @buff_size: size of the buffer passed * @cmd_details: pointer to command details structure or NULL * * Get CEE DCBX mode operational configuration from firmware **/ enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, void *buff, u16 buff_size, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; enum i40e_status_code status; if (buff_size == 0 || !buff) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, cmd_details); return status; } /** * i40e_aq_start_stop_dcbx - Start/Stop DCBx service in FW * @hw: pointer to the hw struct * @start_agent: True if DCBx Agent needs to be Started * False if DCBx Agent needs to be Stopped * @cmd_details: pointer to command details structure or NULL * * Start/Stop the embedded dcbx Agent **/ enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw, bool start_agent, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_stop_start_specific_agent *cmd = (struct i40e_aqc_lldp_stop_start_specific_agent *) &desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop_start_spec_agent); if (start_agent) cmd->command = I40E_AQC_START_SPECIFIC_AGENT_MASK; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_add_udp_tunnel * @hw: pointer to the hw struct * @udp_port: the UDP port to add in Host byte order * @protocol_index: protocol index type * @filter_index: pointer to filter index * @cmd_details: pointer to command details structure or NULL * * Note: Firmware expects the udp_port value to be in Little Endian format, * and this function will call CPU_TO_LE16 to convert from Host byte order to * Little Endian order. **/ enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw, u16 udp_port, u8 protocol_index, u8 *filter_index, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_udp_tunnel *cmd = (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; struct i40e_aqc_del_udp_tunnel_completion *resp = (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); cmd->udp_port = CPU_TO_LE16(udp_port); cmd->protocol_type = protocol_index; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status && filter_index) *filter_index = resp->index; return status; } /** * i40e_aq_del_udp_tunnel * @hw: pointer to the hw struct * @index: filter index * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_remove_udp_tunnel *cmd = (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); cmd->index = index; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_switch_resource_alloc (0x0204) * @hw: pointer to the hw struct * @num_entries: pointer to u8 to store the number of resource entries returned * @buf: pointer to a user supplied buffer. This buffer must be large enough * to store the resource information for all resource types. Each * resource type is a i40e_aqc_switch_resource_alloc_data structure. * @count: size, in bytes, of the buffer provided * @cmd_details: pointer to command details structure or NULL * * Query the resources allocated to a function. **/ enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw, u8 *num_entries, struct i40e_aqc_switch_resource_alloc_element_resp *buf, u16 count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_switch_resource_alloc *cmd_resp = (struct i40e_aqc_get_switch_resource_alloc *)&desc.params.raw; enum i40e_status_code status; u16 length = count * sizeof(*buf); i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_switch_resource_alloc); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (length > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details); if (!status && num_entries) *num_entries = cmd_resp->num_entries; return status; } /** * i40e_aq_delete_element - Delete switch element * @hw: pointer to the hw struct * @seid: the SEID to delete from the switch * @cmd_details: pointer to command details structure or NULL * * This deletes a switch element from the switch. **/ enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_switch_seid *cmd = (struct i40e_aqc_switch_seid *)&desc.params.raw; enum i40e_status_code status; if (seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); cmd->seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_add_pvirt - Instantiate a Port Virtualizer on a port * @hw: pointer to the hw struct * @flags: component flags * @mac_seid: uplink seid (MAC SEID) * @vsi_seid: connected vsi seid * @ret_seid: seid of create pv component * * This instantiates an i40e port virtualizer with specified flags. * Depending on specified flags the port virtualizer can act as a * 802.1Qbr port virtualizer or a 802.1Qbg S-component. */ enum i40e_status_code i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags, u16 mac_seid, u16 vsi_seid, u16 *ret_seid) { struct i40e_aq_desc desc; struct i40e_aqc_add_update_pv *cmd = (struct i40e_aqc_add_update_pv *)&desc.params.raw; struct i40e_aqc_add_update_pv_completion *resp = (struct i40e_aqc_add_update_pv_completion *)&desc.params.raw; enum i40e_status_code status; if (vsi_seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_pv); cmd->command_flags = CPU_TO_LE16(flags); cmd->uplink_seid = CPU_TO_LE16(mac_seid); cmd->connected_seid = CPU_TO_LE16(vsi_seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); if (!status && ret_seid) *ret_seid = LE16_TO_CPU(resp->pv_seid); return status; } /** * i40e_aq_add_tag - Add an S/E-tag * @hw: pointer to the hw struct * @direct_to_queue: should s-tag direct flow to a specific queue * @vsi_seid: VSI SEID to use this tag * @tag: value of the tag * @queue_num: queue number, only valid is direct_to_queue is TRUE * @tags_used: return value, number of tags in use by this PF * @tags_free: return value, number of unallocated tags * @cmd_details: pointer to command details structure or NULL * * This associates an S- or E-tag to a VSI in the switch complex. It returns * the number of tags allocated by the PF, and the number of unallocated * tags available. **/ enum i40e_status_code i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue, u16 vsi_seid, u16 tag, u16 queue_num, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_tag *cmd = (struct i40e_aqc_add_tag *)&desc.params.raw; struct i40e_aqc_add_remove_tag_completion *resp = (struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw; enum i40e_status_code status; if (vsi_seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_tag); cmd->seid = CPU_TO_LE16(vsi_seid); cmd->tag = CPU_TO_LE16(tag); if (direct_to_queue) { cmd->flags = CPU_TO_LE16(I40E_AQC_ADD_TAG_FLAG_TO_QUEUE); cmd->queue_number = CPU_TO_LE16(queue_num); } status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { if (tags_used != NULL) *tags_used = LE16_TO_CPU(resp->tags_used); if (tags_free != NULL) *tags_free = LE16_TO_CPU(resp->tags_free); } return status; } /** * i40e_aq_remove_tag - Remove an S- or E-tag * @hw: pointer to the hw struct * @vsi_seid: VSI SEID this tag is associated with * @tag: value of the S-tag to delete * @tags_used: return value, number of tags in use by this PF * @tags_free: return value, number of unallocated tags * @cmd_details: pointer to command details structure or NULL * * This deletes an S- or E-tag from a VSI in the switch complex. It returns * the number of tags allocated by the PF, and the number of unallocated * tags available. **/ enum i40e_status_code i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid, u16 tag, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_remove_tag *cmd = (struct i40e_aqc_remove_tag *)&desc.params.raw; struct i40e_aqc_add_remove_tag_completion *resp = (struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw; enum i40e_status_code status; if (vsi_seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_tag); cmd->seid = CPU_TO_LE16(vsi_seid); cmd->tag = CPU_TO_LE16(tag); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { if (tags_used != NULL) *tags_used = LE16_TO_CPU(resp->tags_used); if (tags_free != NULL) *tags_free = LE16_TO_CPU(resp->tags_free); } return status; } /** * i40e_aq_add_mcast_etag - Add a multicast E-tag * @hw: pointer to the hw struct * @pv_seid: Port Virtualizer of this SEID to associate E-tag with * @etag: value of E-tag to add * @num_tags_in_buf: number of unicast E-tags in indirect buffer * @buf: address of indirect buffer * @tags_used: return value, number of E-tags in use by this port * @tags_free: return value, number of unallocated M-tags * @cmd_details: pointer to command details structure or NULL * * This associates a multicast E-tag to a port virtualizer. It will return * the number of tags allocated by the PF, and the number of unallocated * tags available. * * The indirect buffer pointed to by buf is a list of 2-byte E-tags, * num_tags_in_buf long. **/ enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pv_seid, u16 etag, u8 num_tags_in_buf, void *buf, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_mcast_etag *cmd = (struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw; struct i40e_aqc_add_remove_mcast_etag_completion *resp = (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw; enum i40e_status_code status; u16 length = sizeof(u16) * num_tags_in_buf; if ((pv_seid == 0) || (buf == NULL) || (num_tags_in_buf == 0)) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_multicast_etag); cmd->pv_seid = CPU_TO_LE16(pv_seid); cmd->etag = CPU_TO_LE16(etag); cmd->num_unicast_etags = num_tags_in_buf; desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (length > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details); if (!status) { if (tags_used != NULL) *tags_used = LE16_TO_CPU(resp->mcast_etags_used); if (tags_free != NULL) *tags_free = LE16_TO_CPU(resp->mcast_etags_free); } return status; } /** * i40e_aq_remove_mcast_etag - Remove a multicast E-tag * @hw: pointer to the hw struct * @pv_seid: Port Virtualizer SEID this M-tag is associated with * @etag: value of the E-tag to remove * @tags_used: return value, number of tags in use by this port * @tags_free: return value, number of unallocated tags * @cmd_details: pointer to command details structure or NULL * * This deletes an E-tag from the port virtualizer. It will return * the number of tags allocated by the port, and the number of unallocated * tags available. **/ enum i40e_status_code i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pv_seid, u16 etag, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_mcast_etag *cmd = (struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw; struct i40e_aqc_add_remove_mcast_etag_completion *resp = (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw; enum i40e_status_code status; if (pv_seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_multicast_etag); cmd->pv_seid = CPU_TO_LE16(pv_seid); cmd->etag = CPU_TO_LE16(etag); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { if (tags_used != NULL) *tags_used = LE16_TO_CPU(resp->mcast_etags_used); if (tags_free != NULL) *tags_free = LE16_TO_CPU(resp->mcast_etags_free); } return status; } /** * i40e_aq_update_tag - Update an S/E-tag * @hw: pointer to the hw struct * @vsi_seid: VSI SEID using this S-tag * @old_tag: old tag value * @new_tag: new tag value * @tags_used: return value, number of tags in use by this PF * @tags_free: return value, number of unallocated tags * @cmd_details: pointer to command details structure or NULL * * This updates the value of the tag currently attached to this VSI * in the switch complex. It will return the number of tags allocated * by the PF, and the number of unallocated tags available. **/ enum i40e_status_code i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid, u16 old_tag, u16 new_tag, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_update_tag *cmd = (struct i40e_aqc_update_tag *)&desc.params.raw; struct i40e_aqc_update_tag_completion *resp = (struct i40e_aqc_update_tag_completion *)&desc.params.raw; enum i40e_status_code status; if (vsi_seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_update_tag); cmd->seid = CPU_TO_LE16(vsi_seid); cmd->old_tag = CPU_TO_LE16(old_tag); cmd->new_tag = CPU_TO_LE16(new_tag); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { if (tags_used != NULL) *tags_used = LE16_TO_CPU(resp->tags_used); if (tags_free != NULL) *tags_free = LE16_TO_CPU(resp->tags_free); } return status; } /** * i40e_aq_dcb_ignore_pfc - Ignore PFC for given TCs * @hw: pointer to the hw struct * @tcmap: TC map for request/release any ignore PFC condition * @request: request or release ignore PFC condition * @tcmap_ret: return TCs for which PFC is currently ignored * @cmd_details: pointer to command details structure or NULL * * This sends out request/release to ignore PFC condition for a TC. * It will return the TCs for which PFC is currently ignored. **/ enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_pfc_ignore *cmd_resp = (struct i40e_aqc_pfc_ignore *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_ignore_pfc); if (request) cmd_resp->command_flags = I40E_AQC_PFC_IGNORE_SET; cmd_resp->tc_bitmap = tcmap; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { if (tcmap_ret != NULL) *tcmap_ret = cmd_resp->tc_bitmap; } return status; } /** * i40e_aq_dcb_updated - DCB Updated Command * @hw: pointer to the hw struct * @cmd_details: pointer to command details structure or NULL * * When LLDP is handled in PF this command is used by the PF * to notify EMP that a DCB setting is modified. * When LLDP is handled in EMP this command is used by the PF * to notify EMP whenever one of the following parameters get * modified: * - PFCLinkDelayAllowance in PRTDCB_GENC.PFCLDA * - PCIRTT in PRTDCB_GENC.PCIRTT * - Maximum Frame Size for non-FCoE TCs set by PRTDCB_TDPUC.MAX_TXFRAME. * EMP will return when the shared RPB settings have been * recomputed and modified. The retval field in the descriptor * will be set to 0 when RPB is modified. **/ enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_add_statistics - Add a statistics block to a VLAN in a switch. * @hw: pointer to the hw struct * @seid: defines the SEID of the switch for which the stats are requested * @vlan_id: the VLAN ID for which the statistics are requested * @stat_index: index of the statistics counters block assigned to this VLAN * @cmd_details: pointer to command details structure or NULL * * XL710 supports 128 smonVlanStats counters.This command is used to * allocate a set of smonVlanStats counters to a specific VLAN in a specific * switch. **/ enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid, u16 vlan_id, u16 *stat_index, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_statistics *cmd_resp = (struct i40e_aqc_add_remove_statistics *)&desc.params.raw; enum i40e_status_code status; if ((seid == 0) || (stat_index == NULL)) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_statistics); cmd_resp->seid = CPU_TO_LE16(seid); cmd_resp->vlan = CPU_TO_LE16(vlan_id); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status && stat_index) *stat_index = LE16_TO_CPU(cmd_resp->stat_index); return status; } /** * i40e_aq_remove_statistics - Remove a statistics block to a VLAN in a switch. * @hw: pointer to the hw struct * @seid: defines the SEID of the switch for which the stats are requested * @vlan_id: the VLAN ID for which the statistics are requested * @stat_index: index of the statistics counters block assigned to this VLAN * @cmd_details: pointer to command details structure or NULL * * XL710 supports 128 smonVlanStats counters.This command is used to * deallocate a set of smonVlanStats counters to a specific VLAN in a specific * switch. **/ enum i40e_status_code i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid, u16 vlan_id, u16 stat_index, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_statistics *cmd = (struct i40e_aqc_add_remove_statistics *)&desc.params.raw; enum i40e_status_code status; if (seid == 0) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_statistics); cmd->seid = CPU_TO_LE16(seid); cmd->vlan = CPU_TO_LE16(vlan_id); cmd->stat_index = CPU_TO_LE16(stat_index); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_port_parameters - set physical port parameters. * @hw: pointer to the hw struct * @bad_frame_vsi: defines the VSI to which bad frames are forwarded * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded * @double_vlan: if set double VLAN is enabled * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_set_port_parameters(struct i40e_hw *hw, u16 bad_frame_vsi, bool save_bad_pac, bool pad_short_pac, bool double_vlan, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aqc_set_port_parameters *cmd; enum i40e_status_code status; struct i40e_aq_desc desc; u16 command_flags = 0; cmd = (struct i40e_aqc_set_port_parameters *)&desc.params.raw; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_port_parameters); cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi); if (save_bad_pac) command_flags |= I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS; if (pad_short_pac) command_flags |= I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS; if (double_vlan) command_flags |= I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA; cmd->command_flags = CPU_TO_LE16(command_flags); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler * @hw: pointer to the hw struct * @seid: seid for the physical port/switching component/vsi * @buff: Indirect buffer to hold data parameters and response * @buff_size: Indirect buffer size * @opcode: Tx scheduler AQ command opcode * @cmd_details: pointer to command details structure or NULL * * Generic command handler for Tx scheduler AQ commands **/ static enum i40e_status_code i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, void *buff, u16 buff_size, enum i40e_admin_queue_opc opcode, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_tx_sched_ind *cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; enum i40e_status_code status; bool cmd_param_flag = FALSE; switch (opcode) { case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit: case i40e_aqc_opc_configure_vsi_tc_bw: case i40e_aqc_opc_enable_switching_comp_ets: case i40e_aqc_opc_modify_switching_comp_ets: case i40e_aqc_opc_disable_switching_comp_ets: case i40e_aqc_opc_configure_switching_comp_ets_bw_limit: case i40e_aqc_opc_configure_switching_comp_bw_config: cmd_param_flag = TRUE; break; case i40e_aqc_opc_query_vsi_bw_config: case i40e_aqc_opc_query_vsi_ets_sla_config: case i40e_aqc_opc_query_switching_comp_ets_config: case i40e_aqc_opc_query_port_ets_config: case i40e_aqc_opc_query_switching_comp_bw_config: cmd_param_flag = FALSE; break; default: return I40E_ERR_PARAM; } i40e_fill_default_direct_cmd_desc(&desc, opcode); /* Indirect command */ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (cmd_param_flag) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); desc.datalen = CPU_TO_LE16(buff_size); cmd->vsi_seid = CPU_TO_LE16(seid); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); return status; } /** * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit * @hw: pointer to the hw struct * @seid: VSI seid * @credit: BW limit credits (0 = disabled) * @max_credit: Max BW limit credits * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, u16 seid, u16 credit, u8 max_credit, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_configure_vsi_bw_limit *cmd = (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_configure_vsi_bw_limit); cmd->vsi_seid = CPU_TO_LE16(seid); cmd->credit = CPU_TO_LE16(credit); cmd->max_credit = max_credit; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_config_switch_comp_bw_limit - Configure Switching component BW Limit * @hw: pointer to the hw struct * @seid: switching component seid * @credit: BW limit credits (0 = disabled) * @max_bw: Max BW limit credits * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw, u16 seid, u16 credit, u8 max_bw, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_configure_switching_comp_bw_limit *cmd = (struct i40e_aqc_configure_switching_comp_bw_limit *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_configure_switching_comp_bw_limit); cmd->seid = CPU_TO_LE16(seid); cmd->credit = CPU_TO_LE16(credit); cmd->max_bw = max_bw; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_config_vsi_ets_sla_bw_limit - Config VSI BW Limit per TC * @hw: pointer to the hw struct * @seid: VSI seid * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_configure_vsi_ets_sla_bw_limit, cmd_details); } /** * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC * @hw: pointer to the hw struct * @seid: VSI seid * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_configure_vsi_tc_bw, cmd_details); } /** * i40e_aq_config_switch_comp_ets_bw_limit - Config Switch comp BW Limit per TC * @hw: pointer to the hw struct * @seid: seid of the switching component * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit( struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_configure_switching_comp_ets_bw_limit, cmd_details); } /** * i40e_aq_query_vsi_bw_config - Query VSI BW configuration * @hw: pointer to the hw struct * @seid: seid of the VSI * @bw_data: Buffer to hold VSI BW configuration * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_vsi_bw_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_vsi_bw_config, cmd_details); } /** * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC * @hw: pointer to the hw struct * @seid: seid of the VSI * @bw_data: Buffer to hold VSI BW configuration per TC * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_vsi_ets_sla_config, cmd_details); } /** * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC * @hw: pointer to the hw struct * @seid: seid of the switching component * @bw_data: Buffer to hold switching component's per TC BW config * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_switching_comp_ets_config, cmd_details); } /** * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration * @hw: pointer to the hw struct * @seid: seid of the VSI or switching component connected to Physical Port * @bw_data: Buffer to hold current ETS configuration for the Physical Port * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_query_port_ets_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_port_ets_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_port_ets_config, cmd_details); } /** * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration * @hw: pointer to the hw struct * @seid: seid of the switching component * @bw_data: Buffer to hold switching component's BW configuration * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_switching_comp_bw_config, cmd_details); } /** * i40e_validate_filter_settings * @hw: pointer to the hardware structure * @settings: Filter control settings * * Check and validate the filter control settings passed. * The function checks for the valid filter/context sizes being * passed for FCoE and PE. * * Returns I40E_SUCCESS if the values passed are valid and within * range else returns an error. **/ static enum i40e_status_code i40e_validate_filter_settings(struct i40e_hw *hw, struct i40e_filter_control_settings *settings) { u32 fcoe_cntx_size, fcoe_filt_size; u32 pe_cntx_size, pe_filt_size; u32 fcoe_fmax; u32 val; /* Validate FCoE settings passed */ switch (settings->fcoe_filt_num) { case I40E_HASH_FILTER_SIZE_1K: case I40E_HASH_FILTER_SIZE_2K: case I40E_HASH_FILTER_SIZE_4K: case I40E_HASH_FILTER_SIZE_8K: case I40E_HASH_FILTER_SIZE_16K: case I40E_HASH_FILTER_SIZE_32K: fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE; fcoe_filt_size <<= (u32)settings->fcoe_filt_num; break; default: return I40E_ERR_PARAM; } switch (settings->fcoe_cntx_num) { case I40E_DMA_CNTX_SIZE_512: case I40E_DMA_CNTX_SIZE_1K: case I40E_DMA_CNTX_SIZE_2K: case I40E_DMA_CNTX_SIZE_4K: fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num; break; default: return I40E_ERR_PARAM; } /* Validate PE settings passed */ switch (settings->pe_filt_num) { case I40E_HASH_FILTER_SIZE_1K: case I40E_HASH_FILTER_SIZE_2K: case I40E_HASH_FILTER_SIZE_4K: case I40E_HASH_FILTER_SIZE_8K: case I40E_HASH_FILTER_SIZE_16K: case I40E_HASH_FILTER_SIZE_32K: case I40E_HASH_FILTER_SIZE_64K: case I40E_HASH_FILTER_SIZE_128K: case I40E_HASH_FILTER_SIZE_256K: case I40E_HASH_FILTER_SIZE_512K: case I40E_HASH_FILTER_SIZE_1M: pe_filt_size = I40E_HASH_FILTER_BASE_SIZE; pe_filt_size <<= (u32)settings->pe_filt_num; break; default: return I40E_ERR_PARAM; } switch (settings->pe_cntx_num) { case I40E_DMA_CNTX_SIZE_512: case I40E_DMA_CNTX_SIZE_1K: case I40E_DMA_CNTX_SIZE_2K: case I40E_DMA_CNTX_SIZE_4K: case I40E_DMA_CNTX_SIZE_8K: case I40E_DMA_CNTX_SIZE_16K: case I40E_DMA_CNTX_SIZE_32K: case I40E_DMA_CNTX_SIZE_64K: case I40E_DMA_CNTX_SIZE_128K: case I40E_DMA_CNTX_SIZE_256K: pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; pe_cntx_size <<= (u32)settings->pe_cntx_num; break; default: return I40E_ERR_PARAM; } /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ val = rd32(hw, I40E_GLHMC_FCOEFMAX); fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK) >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT; if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) return I40E_ERR_INVALID_SIZE; return I40E_SUCCESS; } /** * i40e_set_filter_control * @hw: pointer to the hardware structure * @settings: Filter control settings * * Set the Queue Filters for PE/FCoE and enable filters required * for a single PF. It is expected that these settings are programmed * at the driver initialization time. **/ enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw, struct i40e_filter_control_settings *settings) { enum i40e_status_code ret = I40E_SUCCESS; u32 hash_lut_size = 0; u32 val; if (!settings) return I40E_ERR_PARAM; /* Validate the input settings */ ret = i40e_validate_filter_settings(hw, settings); if (ret) return ret; /* Read the PF Queue Filter control register */ val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); /* Program required PE hash buckets for the PF */ val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) & I40E_PFQF_CTL_0_PEHSIZE_MASK; /* Program required PE contexts for the PF */ val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK; val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) & I40E_PFQF_CTL_0_PEDSIZE_MASK; /* Program required FCoE hash buckets for the PF */ val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK; val |= ((u32)settings->fcoe_filt_num << I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) & I40E_PFQF_CTL_0_PFFCHSIZE_MASK; /* Program required FCoE DDP contexts for the PF */ val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK; val |= ((u32)settings->fcoe_cntx_num << I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) & I40E_PFQF_CTL_0_PFFCDSIZE_MASK; /* Program Hash LUT size for the PF */ val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512) hash_lut_size = 1; val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) & I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */ if (settings->enable_fdir) val |= I40E_PFQF_CTL_0_FD_ENA_MASK; if (settings->enable_ethtype) val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK; if (settings->enable_macvlan) val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); return I40E_SUCCESS; } /** * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter * @hw: pointer to the hw struct * @mac_addr: MAC address to use in the filter * @ethtype: Ethertype to use in the filter * @flags: Flags that needs to be applied to the filter * @vsi_seid: seid of the control VSI * @queue: VSI queue number to send the packet to * @is_add: Add control packet filter if True else remove * @stats: Structure to hold information on control filter counts * @cmd_details: pointer to command details structure or NULL * * This command will Add or Remove control packet filter for a control VSI. * In return it will update the total number of perfect filter count in * the stats member. **/ enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, u8 *mac_addr, u16 ethtype, u16 flags, u16 vsi_seid, u16 queue, bool is_add, struct i40e_control_filter_stats *stats, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_control_packet_filter *cmd = (struct i40e_aqc_add_remove_control_packet_filter *) &desc.params.raw; struct i40e_aqc_add_remove_control_packet_filter_completion *resp = (struct i40e_aqc_add_remove_control_packet_filter_completion *) &desc.params.raw; enum i40e_status_code status; if (vsi_seid == 0) return I40E_ERR_PARAM; if (is_add) { i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_control_packet_filter); cmd->queue = CPU_TO_LE16(queue); } else { i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_control_packet_filter); } if (mac_addr) i40e_memcpy(cmd->mac, mac_addr, ETH_ALEN, I40E_NONDMA_TO_NONDMA); cmd->etype = CPU_TO_LE16(ethtype); cmd->flags = CPU_TO_LE16(flags); cmd->seid = CPU_TO_LE16(vsi_seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status && stats) { stats->mac_etype_used = LE16_TO_CPU(resp->mac_etype_used); stats->etype_used = LE16_TO_CPU(resp->etype_used); stats->mac_etype_free = LE16_TO_CPU(resp->mac_etype_free); stats->etype_free = LE16_TO_CPU(resp->etype_free); } return status; } /** * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control * @hw: pointer to the hw struct * @seid: VSI seid to add ethertype filter from **/ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, u16 seid) { #define I40E_FLOW_CONTROL_ETHTYPE 0x8808 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; enum i40e_status_code status; status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, seid, 0, TRUE, NULL, NULL); if (status) DEBUGOUT("Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); } /** * i40e_fix_up_geneve_vni - adjust Geneve VNI for HW issue * @filters: list of cloud filters * @filter_count: length of list * * There's an issue in the device where the Geneve VNI layout needs * to be shifted 1 byte over from the VxLAN VNI **/ static void i40e_fix_up_geneve_vni( struct i40e_aqc_add_remove_cloud_filters_element_data *filters, u8 filter_count) { struct i40e_aqc_add_remove_cloud_filters_element_data *f = filters; int i; for (i = 0; i < filter_count; i++) { u16 tnl_type; u32 ti; tnl_type = (LE16_TO_CPU(f[i].flags) & I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { ti = LE32_TO_CPU(f[i].tenant_id); f[i].tenant_id = CPU_TO_LE32(ti << 8); } } } /** * i40e_aq_add_cloud_filters * @hw: pointer to the hardware structure * @seid: VSI seid to add cloud filters from * @filters: Buffer which contains the filters to be added * @filter_count: number of filters contained in the buffer * * Set the cloud filters for a given VSI. The contents of the * i40e_aqc_add_remove_cloud_filters_element_data are filled * in by the caller of the function. * **/ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid, struct i40e_aqc_add_remove_cloud_filters_element_data *filters, u8 filter_count) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_cloud_filters *cmd = (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; enum i40e_status_code status; u16 buff_len; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_cloud_filters); buff_len = filter_count * sizeof(*filters); desc.datalen = CPU_TO_LE16(buff_len); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); cmd->num_filters = filter_count; cmd->seid = CPU_TO_LE16(seid); i40e_fix_up_geneve_vni(filters, filter_count); status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); return status; } /** * i40e_aq_remove_cloud_filters * @hw: pointer to the hardware structure * @seid: VSI seid to remove cloud filters from * @filters: Buffer which contains the filters to be removed * @filter_count: number of filters contained in the buffer * * Remove the cloud filters for a given VSI. The contents of the * i40e_aqc_add_remove_cloud_filters_element_data are filled * in by the caller of the function. * **/ enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw, u16 seid, struct i40e_aqc_add_remove_cloud_filters_element_data *filters, u8 filter_count) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_cloud_filters *cmd = (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; enum i40e_status_code status; u16 buff_len; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_cloud_filters); buff_len = filter_count * sizeof(*filters); desc.datalen = CPU_TO_LE16(buff_len); desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); cmd->num_filters = filter_count; cmd->seid = CPU_TO_LE16(seid); i40e_fix_up_geneve_vni(filters, filter_count); status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); return status; } /** * i40e_aq_alternate_write * @hw: pointer to the hardware structure * @reg_addr0: address of first dword to be read * @reg_val0: value to be written under 'reg_addr0' * @reg_addr1: address of second dword to be read * @reg_val1: value to be written under 'reg_addr1' * * Write one or two dwords to alternate structure. Fields are indicated * by 'reg_addr0' and 'reg_addr1' register numbers. * **/ enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw, u32 reg_addr0, u32 reg_val0, u32 reg_addr1, u32 reg_val1) { struct i40e_aq_desc desc; struct i40e_aqc_alternate_write *cmd_resp = (struct i40e_aqc_alternate_write *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_write); cmd_resp->address0 = CPU_TO_LE32(reg_addr0); cmd_resp->address1 = CPU_TO_LE32(reg_addr1); cmd_resp->data0 = CPU_TO_LE32(reg_val0); cmd_resp->data1 = CPU_TO_LE32(reg_val1); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); return status; } /** * i40e_aq_alternate_write_indirect * @hw: pointer to the hardware structure * @addr: address of a first register to be modified * @dw_count: number of alternate structure fields to write * @buffer: pointer to the command buffer * * Write 'dw_count' dwords from 'buffer' to alternate structure * starting at 'addr'. * **/ enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw, u32 addr, u32 dw_count, void *buffer) { struct i40e_aq_desc desc; struct i40e_aqc_alternate_ind_write *cmd_resp = (struct i40e_aqc_alternate_ind_write *)&desc.params.raw; enum i40e_status_code status; if (buffer == NULL) return I40E_ERR_PARAM; /* Indirect command */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_write_indirect); desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD); desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF); if (dw_count > (I40E_AQ_LARGE_BUF/4)) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); cmd_resp->address = CPU_TO_LE32(addr); cmd_resp->length = CPU_TO_LE32(dw_count); status = i40e_asq_send_command(hw, &desc, buffer, I40E_LO_DWORD(4*dw_count), NULL); return status; } /** * i40e_aq_alternate_read * @hw: pointer to the hardware structure * @reg_addr0: address of first dword to be read * @reg_val0: pointer for data read from 'reg_addr0' * @reg_addr1: address of second dword to be read * @reg_val1: pointer for data read from 'reg_addr1' * * Read one or two dwords from alternate structure. Fields are indicated * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer * is not passed then only register at 'reg_addr0' is read. * **/ enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw, u32 reg_addr0, u32 *reg_val0, u32 reg_addr1, u32 *reg_val1) { struct i40e_aq_desc desc; struct i40e_aqc_alternate_write *cmd_resp = (struct i40e_aqc_alternate_write *)&desc.params.raw; enum i40e_status_code status; if (reg_val0 == NULL) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); cmd_resp->address0 = CPU_TO_LE32(reg_addr0); cmd_resp->address1 = CPU_TO_LE32(reg_addr1); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); if (status == I40E_SUCCESS) { *reg_val0 = LE32_TO_CPU(cmd_resp->data0); if (reg_val1 != NULL) *reg_val1 = LE32_TO_CPU(cmd_resp->data1); } return status; } /** * i40e_aq_alternate_read_indirect * @hw: pointer to the hardware structure * @addr: address of the alternate structure field * @dw_count: number of alternate structure fields to read * @buffer: pointer to the command buffer * * Read 'dw_count' dwords from alternate structure starting at 'addr' and * place them in 'buffer'. The buffer should be allocated by caller. * **/ enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw, u32 addr, u32 dw_count, void *buffer) { struct i40e_aq_desc desc; struct i40e_aqc_alternate_ind_write *cmd_resp = (struct i40e_aqc_alternate_ind_write *)&desc.params.raw; enum i40e_status_code status; if (buffer == NULL) return I40E_ERR_PARAM; /* Indirect command */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read_indirect); desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD); desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF); if (dw_count > (I40E_AQ_LARGE_BUF/4)) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); cmd_resp->address = CPU_TO_LE32(addr); cmd_resp->length = CPU_TO_LE32(dw_count); status = i40e_asq_send_command(hw, &desc, buffer, I40E_LO_DWORD(4*dw_count), NULL); return status; } /** * i40e_aq_alternate_clear * @hw: pointer to the HW structure. * * Clear the alternate structures of the port from which the function * is called. * **/ enum i40e_status_code i40e_aq_alternate_clear(struct i40e_hw *hw) { struct i40e_aq_desc desc; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_clear_port); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); return status; } /** * i40e_aq_alternate_write_done * @hw: pointer to the HW structure. * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS * @reset_needed: indicates the SW should trigger GLOBAL reset * * Indicates to the FW that alternate structures have been changed. * **/ enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw, u8 bios_mode, bool *reset_needed) { struct i40e_aq_desc desc; struct i40e_aqc_alternate_write_done *cmd = (struct i40e_aqc_alternate_write_done *)&desc.params.raw; enum i40e_status_code status; if (reset_needed == NULL) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_write_done); cmd->cmd_flags = CPU_TO_LE16(bios_mode); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); if (!status && reset_needed) *reset_needed = ((LE16_TO_CPU(cmd->cmd_flags) & I40E_AQ_ALTERNATE_RESET_NEEDED) != 0); return status; } /** * i40e_aq_set_oem_mode * @hw: pointer to the HW structure. * @oem_mode: the OEM mode to be used * * Sets the device to a specific operating mode. Currently the only supported * mode is no_clp, which causes FW to refrain from using Alternate RAM. * **/ enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw, u8 oem_mode) { struct i40e_aq_desc desc; struct i40e_aqc_alternate_write_done *cmd = (struct i40e_aqc_alternate_write_done *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_set_mode); cmd->cmd_flags = CPU_TO_LE16(oem_mode); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); return status; } /** * i40e_aq_resume_port_tx * @hw: pointer to the hardware structure * @cmd_details: pointer to command details structure or NULL * * Resume port's Tx traffic **/ enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_set_pci_config_data - store PCI bus info * @hw: pointer to hardware structure * @link_status: the link status word from PCI config space * * Stores the PCI bus info (speed, width, type) within the i40e_hw structure **/ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) { hw->bus.type = i40e_bus_type_pci_express; switch (link_status & I40E_PCI_LINK_WIDTH) { case I40E_PCI_LINK_WIDTH_1: hw->bus.width = i40e_bus_width_pcie_x1; break; case I40E_PCI_LINK_WIDTH_2: hw->bus.width = i40e_bus_width_pcie_x2; break; case I40E_PCI_LINK_WIDTH_4: hw->bus.width = i40e_bus_width_pcie_x4; break; case I40E_PCI_LINK_WIDTH_8: hw->bus.width = i40e_bus_width_pcie_x8; break; default: hw->bus.width = i40e_bus_width_unknown; break; } switch (link_status & I40E_PCI_LINK_SPEED) { case I40E_PCI_LINK_SPEED_2500: hw->bus.speed = i40e_bus_speed_2500; break; case I40E_PCI_LINK_SPEED_5000: hw->bus.speed = i40e_bus_speed_5000; break; case I40E_PCI_LINK_SPEED_8000: hw->bus.speed = i40e_bus_speed_8000; break; default: hw->bus.speed = i40e_bus_speed_unknown; break; } } /** * i40e_aq_debug_dump * @hw: pointer to the hardware structure * @cluster_id: specific cluster to dump * @table_id: table id within cluster * @start_index: index of line in the block to read * @buff_size: dump buffer size * @buff: dump buffer * @ret_buff_size: actual buffer size returned * @ret_next_table: next block to read * @ret_next_index: next index to read * @cmd_details: pointer to command details structure or NULL * * Dump internal FW/HW data for debug purposes. * **/ enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, u8 table_id, u32 start_index, u16 buff_size, void *buff, u16 *ret_buff_size, u8 *ret_next_table, u32 *ret_next_index, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_debug_dump_internals *cmd = (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; struct i40e_aqc_debug_dump_internals *resp = (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; enum i40e_status_code status; if (buff_size == 0 || !buff) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_dump_internals); /* Indirect Command */ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); cmd->cluster_id = cluster_id; cmd->table_id = table_id; cmd->idx = CPU_TO_LE32(start_index); desc.datalen = CPU_TO_LE16(buff_size); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); if (!status) { if (ret_buff_size != NULL) *ret_buff_size = LE16_TO_CPU(desc.datalen); if (ret_next_table != NULL) *ret_next_table = resp->table_id; if (ret_next_index != NULL) *ret_next_index = LE32_TO_CPU(resp->idx); } return status; } + /** + * i40e_enable_eee + * @hw: pointer to the hardware structure + * @enable: state of Energy Efficient Ethernet mode to be set + * + * Enables or disables Energy Efficient Ethernet (EEE) mode + * accordingly to @enable parameter. + **/ +enum i40e_status_code i40e_enable_eee(struct i40e_hw *hw, bool enable) +{ + struct i40e_aq_get_phy_abilities_resp abilities; + struct i40e_aq_set_phy_config config; + enum i40e_status_code status; + __le16 eee_capability; + + /* Get initial PHY capabilities */ + status = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities, + NULL); + if (status) + goto err; + + /* Check whether NIC configuration is compatible with Energy Efficient + * Ethernet (EEE) mode. + */ + if (abilities.eee_capability == 0) { + status = I40E_ERR_CONFIG; + goto err; + } + + /* Cache initial EEE capability */ + eee_capability = abilities.eee_capability; + + /* Get current configuration */ + status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities, + NULL); + if (status) + goto err; + + /* Cache current configuration */ + config.phy_type = abilities.phy_type; + config.link_speed = abilities.link_speed; + config.abilities = abilities.abilities | + I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + config.eeer = abilities.eeer_val; + config.low_power_ctrl = abilities.d3_lpan; + config.fec_config = abilities.fec_cfg_curr_mod_ext_info & + I40E_AQ_PHY_FEC_CONFIG_MASK; + + /* Set desired EEE state */ + if (enable) { + config.eee_capability = eee_capability; + config.eeer |= I40E_PRTPM_EEER_TX_LPI_EN_MASK; + } else { + config.eee_capability = 0; + config.eeer &= ~I40E_PRTPM_EEER_TX_LPI_EN_MASK; + } + + /* Save modified config */ + status = i40e_aq_set_phy_config(hw, &config, NULL); +err: + return status; +} + +/** * i40e_read_bw_from_alt_ram * @hw: pointer to the hardware structure * @max_bw: pointer for max_bw read * @min_bw: pointer for min_bw read * @min_valid: pointer for bool that is TRUE if min_bw is a valid value * @max_valid: pointer for bool that is TRUE if max_bw is a valid value * * Read bw from the alternate ram for the given pf **/ enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw, u32 *max_bw, u32 *min_bw, bool *min_valid, bool *max_valid) { enum i40e_status_code status; u32 max_bw_addr, min_bw_addr; /* Calculate the address of the min/max bw registers */ max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + I40E_ALT_STRUCT_MAX_BW_OFFSET + (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + I40E_ALT_STRUCT_MIN_BW_OFFSET + (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); /* Read the bandwidths from alt ram */ status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, min_bw_addr, min_bw); if (*min_bw & I40E_ALT_BW_VALID_MASK) *min_valid = TRUE; else *min_valid = FALSE; if (*max_bw & I40E_ALT_BW_VALID_MASK) *max_valid = TRUE; else *max_valid = FALSE; return status; } /** * i40e_aq_configure_partition_bw * @hw: pointer to the hardware structure * @bw_data: Buffer holding valid pfs and bw limits * @cmd_details: pointer to command details * * Configure partitions guaranteed/max bw **/ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw, struct i40e_aqc_configure_partition_bw_data *bw_data, struct i40e_asq_cmd_details *cmd_details) { enum i40e_status_code status; struct i40e_aq_desc desc; u16 bwd_size = sizeof(*bw_data); i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_configure_partition_bw); /* Indirect command */ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); desc.datalen = CPU_TO_LE16(bwd_size); status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, cmd_details); return status; } /** * i40e_read_phy_register_clause22 * @hw: pointer to the HW structure * @reg: register address in the page * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Reads specified PHY register value **/ enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw, u16 reg, u8 phy_addr, u16 *value) { enum i40e_status_code status = I40E_ERR_TIMEOUT; u8 port_num = (u8)hw->func_caps.mdio_port_num; u32 command = 0; u16 retry = 1000; command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) | (I40E_MDIO_CLAUSE22_STCODE_MASK) | (I40E_GLGEN_MSCA_MDICMD_MASK); wr32(hw, I40E_GLGEN_MSCA(port_num), command); do { command = rd32(hw, I40E_GLGEN_MSCA(port_num)); if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { status = I40E_SUCCESS; break; } i40e_usec_delay(10); retry--; } while (retry); if (status) { i40e_debug(hw, I40E_DEBUG_PHY, "PHY: Can't write command to external PHY.\n"); } else { command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; } return status; } /** * i40e_write_phy_register_clause22 * @hw: pointer to the HW structure * @reg: register address in the page * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Writes specified PHY register value **/ enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw, u16 reg, u8 phy_addr, u16 value) { enum i40e_status_code status = I40E_ERR_TIMEOUT; u8 port_num = (u8)hw->func_caps.mdio_port_num; u32 command = 0; u16 retry = 1000; command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; wr32(hw, I40E_GLGEN_MSRWD(port_num), command); command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) | (I40E_MDIO_CLAUSE22_STCODE_MASK) | (I40E_GLGEN_MSCA_MDICMD_MASK); wr32(hw, I40E_GLGEN_MSCA(port_num), command); do { command = rd32(hw, I40E_GLGEN_MSCA(port_num)); if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { status = I40E_SUCCESS; break; } i40e_usec_delay(10); retry--; } while (retry); return status; } /** * i40e_read_phy_register_clause45 * @hw: pointer to the HW structure * @page: registers page number * @reg: register address in the page * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Reads specified PHY register value **/ enum i40e_status_code i40e_read_phy_register_clause45(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 *value) { enum i40e_status_code status = I40E_ERR_TIMEOUT; u32 command = 0; u16 retry = 1000; u8 port_num = (u8)hw->func_caps.mdio_port_num; command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | (I40E_MDIO_CLAUSE45_STCODE_MASK) | (I40E_GLGEN_MSCA_MDICMD_MASK) | (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); wr32(hw, I40E_GLGEN_MSCA(port_num), command); do { command = rd32(hw, I40E_GLGEN_MSCA(port_num)); if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { status = I40E_SUCCESS; break; } i40e_usec_delay(10); retry--; } while (retry); if (status) { i40e_debug(hw, I40E_DEBUG_PHY, "PHY: Can't write command to external PHY.\n"); goto phy_read_end; } command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) | (I40E_MDIO_CLAUSE45_STCODE_MASK) | (I40E_GLGEN_MSCA_MDICMD_MASK) | (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); status = I40E_ERR_TIMEOUT; retry = 1000; wr32(hw, I40E_GLGEN_MSCA(port_num), command); do { command = rd32(hw, I40E_GLGEN_MSCA(port_num)); if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { status = I40E_SUCCESS; break; } i40e_usec_delay(10); retry--; } while (retry); if (!status) { command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; } else { i40e_debug(hw, I40E_DEBUG_PHY, "PHY: Can't read register value from external PHY.\n"); } phy_read_end: return status; } /** * i40e_write_phy_register_clause45 * @hw: pointer to the HW structure * @page: registers page number * @reg: register address in the page * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Writes value to specified PHY register **/ enum i40e_status_code i40e_write_phy_register_clause45(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 value) { enum i40e_status_code status = I40E_ERR_TIMEOUT; u32 command = 0; u16 retry = 1000; u8 port_num = (u8)hw->func_caps.mdio_port_num; command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | (I40E_MDIO_CLAUSE45_STCODE_MASK) | (I40E_GLGEN_MSCA_MDICMD_MASK) | (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); wr32(hw, I40E_GLGEN_MSCA(port_num), command); do { command = rd32(hw, I40E_GLGEN_MSCA(port_num)); if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { status = I40E_SUCCESS; break; } i40e_usec_delay(10); retry--; } while (retry); if (status) { i40e_debug(hw, I40E_DEBUG_PHY, "PHY: Can't write command to external PHY.\n"); goto phy_write_end; } command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; wr32(hw, I40E_GLGEN_MSRWD(port_num), command); command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) | (I40E_MDIO_CLAUSE45_STCODE_MASK) | (I40E_GLGEN_MSCA_MDICMD_MASK) | (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); status = I40E_ERR_TIMEOUT; retry = 1000; wr32(hw, I40E_GLGEN_MSCA(port_num), command); do { command = rd32(hw, I40E_GLGEN_MSCA(port_num)); if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { status = I40E_SUCCESS; break; } i40e_usec_delay(10); retry--; } while (retry); phy_write_end: return status; } /** * i40e_write_phy_register * @hw: pointer to the HW structure * @page: registers page number * @reg: register address in the page * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Writes value to specified PHY register **/ enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 value) { enum i40e_status_code status; switch (hw->device_id) { case I40E_DEV_ID_1G_BASE_T_X722: status = i40e_write_phy_register_clause22(hw, reg, phy_addr, value); break; case I40E_DEV_ID_10G_BASE_T: case I40E_DEV_ID_10G_BASE_T4: + case I40E_DEV_ID_10G_BASE_T_BC: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_25G_B: case I40E_DEV_ID_25G_SFP28: status = i40e_write_phy_register_clause45(hw, page, reg, phy_addr, value); break; default: status = I40E_ERR_UNKNOWN_PHY; break; } return status; } /** * i40e_read_phy_register * @hw: pointer to the HW structure * @page: registers page number * @reg: register address in the page * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Reads specified PHY register value **/ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 *value) { enum i40e_status_code status; switch (hw->device_id) { case I40E_DEV_ID_1G_BASE_T_X722: status = i40e_read_phy_register_clause22(hw, reg, phy_addr, value); break; case I40E_DEV_ID_10G_BASE_T: case I40E_DEV_ID_10G_BASE_T4: + case I40E_DEV_ID_10G_BASE_T_BC: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_25G_B: case I40E_DEV_ID_25G_SFP28: status = i40e_read_phy_register_clause45(hw, page, reg, phy_addr, value); break; default: status = I40E_ERR_UNKNOWN_PHY; break; } return status; } /** * i40e_get_phy_address * @hw: pointer to the HW structure * @dev_num: PHY port num that address we want * * Gets PHY address for current port **/ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) { u8 port_num = (u8)hw->func_caps.mdio_port_num; u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num)); return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f; } /** * i40e_blink_phy_led * @hw: pointer to the HW structure * @time: time how long led will blinks in secs * @interval: gap between LED on and off in msecs * * Blinks PHY link LED **/ enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw, u32 time, u32 interval) { enum i40e_status_code status = I40E_SUCCESS; u32 i; u16 led_ctl = 0; u16 gpio_led_port; u16 led_reg; u16 led_addr = I40E_PHY_LED_PROV_REG_1; u8 phy_addr = 0; u8 port_num; i = rd32(hw, I40E_PFGEN_PORTNUM); port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); phy_addr = i40e_get_phy_address(hw, port_num); for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, led_addr++) { status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, led_addr, phy_addr, &led_reg); if (status) goto phy_blinking_end; led_ctl = led_reg; if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { led_reg = 0; status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, led_addr, phy_addr, led_reg); if (status) goto phy_blinking_end; break; } } if (time > 0 && interval > 0) { for (i = 0; i < time * 1000; i += interval) { status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, led_addr, phy_addr, &led_reg); if (status) goto restore_config; if (led_reg & I40E_PHY_LED_MANUAL_ON) led_reg = 0; else led_reg = I40E_PHY_LED_MANUAL_ON; status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, led_addr, phy_addr, led_reg); if (status) goto restore_config; i40e_msec_delay(interval); } } restore_config: status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, led_addr, phy_addr, led_ctl); phy_blinking_end: return status; } /** * i40e_led_get_reg - read LED register * @hw: pointer to the HW structure * @led_addr: LED register address * @reg_val: read register value **/ static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, u32 *reg_val) { enum i40e_status_code status; u8 phy_addr = 0; *reg_val = 0; if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL, - I40E_PHY_COM_REG_PAGE, + I40E_PHY_COM_REG_PAGE, TRUE, I40E_PHY_LED_PROV_REG_1, reg_val, NULL); } else { phy_addr = i40e_get_phy_address(hw, hw->port); status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, led_addr, phy_addr, (u16 *)reg_val); } return status; } /** * i40e_led_set_reg - write LED register * @hw: pointer to the HW structure * @led_addr: LED register address * @reg_val: register value to write **/ static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, u32 reg_val) { enum i40e_status_code status; u8 phy_addr = 0; if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { status = i40e_aq_set_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL, - I40E_PHY_COM_REG_PAGE, + I40E_PHY_COM_REG_PAGE, TRUE, I40E_PHY_LED_PROV_REG_1, reg_val, NULL); } else { phy_addr = i40e_get_phy_address(hw, hw->port); status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, led_addr, phy_addr, (u16)reg_val); } return status; } /** * i40e_led_get_phy - return current on/off mode * @hw: pointer to the hw struct * @led_addr: address of led register to use * @val: original value of register to use * **/ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, u16 *val) { enum i40e_status_code status = I40E_SUCCESS; u16 gpio_led_port; u32 reg_val_aq; u16 temp_addr; u8 phy_addr = 0; u16 reg_val; if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL, - I40E_PHY_COM_REG_PAGE, + I40E_PHY_COM_REG_PAGE, TRUE, I40E_PHY_LED_PROV_REG_1, ®_val_aq, NULL); if (status == I40E_SUCCESS) *val = (u16)reg_val_aq; return status; } temp_addr = I40E_PHY_LED_PROV_REG_1; phy_addr = i40e_get_phy_address(hw, hw->port); for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, temp_addr++) { status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, temp_addr, phy_addr, ®_val); if (status) return status; *val = reg_val; if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) { *led_addr = temp_addr; break; } } return status; } /** * i40e_led_set_phy * @hw: pointer to the HW structure * @on: TRUE or FALSE * @led_addr: address of led register to use * @mode: original val plus bit for set or ignore * * Set led's on or off when controlled by the PHY * **/ enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on, u16 led_addr, u32 mode) { enum i40e_status_code status = I40E_SUCCESS; u32 led_ctl = 0; u32 led_reg = 0; status = i40e_led_get_reg(hw, led_addr, &led_reg); if (status) return status; led_ctl = led_reg; if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { led_reg = 0; status = i40e_led_set_reg(hw, led_addr, led_reg); if (status) return status; } status = i40e_led_get_reg(hw, led_addr, &led_reg); if (status) goto restore_config; if (on) led_reg = I40E_PHY_LED_MANUAL_ON; else led_reg = 0; status = i40e_led_set_reg(hw, led_addr, led_reg); if (status) goto restore_config; if (mode & I40E_PHY_LED_MODE_ORIG) { led_ctl = (mode & I40E_PHY_LED_MODE_MASK); status = i40e_led_set_reg(hw, led_addr, led_ctl); } return status; restore_config: status = i40e_led_set_reg(hw, led_addr, led_ctl); return status; } /** + * i40e_get_phy_lpi_status - read LPI status from PHY or MAC register + * @hw: pointer to the hw struct + * @stat: pointer to structure with status of rx and tx lpi + * + * Read LPI state directly from external PHY register or from MAC + * register, depending on device ID and current link speed. + */ +enum i40e_status_code i40e_get_phy_lpi_status(struct i40e_hw *hw, + struct i40e_hw_port_stats *stat) +{ + enum i40e_status_code ret = I40E_SUCCESS; + u32 val; + + stat->rx_lpi_status = 0; + stat->tx_lpi_status = 0; + + if (hw->device_id == I40E_DEV_ID_10G_BASE_T_BC && + (hw->phy.link_info.link_speed == I40E_LINK_SPEED_2_5GB || + hw->phy.link_info.link_speed == I40E_LINK_SPEED_5GB)) { + ret = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL, + I40E_BCM_PHY_PCS_STATUS1_PAGE, + TRUE, + I40E_BCM_PHY_PCS_STATUS1_REG, + &val, NULL); + + if (ret != I40E_SUCCESS) + return ret; + + stat->rx_lpi_status = !!(val & I40E_BCM_PHY_PCS_STATUS1_RX_LPI); + stat->tx_lpi_status = !!(val & I40E_BCM_PHY_PCS_STATUS1_TX_LPI); + + return ret; + } + + val = rd32(hw, I40E_PRTPM_EEE_STAT); + stat->rx_lpi_status = (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> + I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; + stat->tx_lpi_status = (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> + I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; + + return ret; +} + +/** + * i40e_get_lpi_counters - read LPI counters from EEE statistics + * @hw: pointer to the hw struct + * @tx_counter: pointer to memory for TX LPI counter + * @rx_counter: pointer to memory for RX LPI counter + * + * Read Low Power Idle (LPI) mode counters from Energy Efficient + * Ethernet (EEE) statistics. + **/ +enum i40e_status_code i40e_get_lpi_counters(struct i40e_hw *hw, + u32 *tx_counter, u32 *rx_counter) +{ + enum i40e_status_code retval; + u32 cmd_status; + + retval = i40e_aq_run_phy_activity(hw, + I40E_AQ_RUN_PHY_ACTIVITY_ACTIVITY_ID_USER_DEFINED, + I40E_AQ_RUN_PHY_ACTIVITY_DNL_OPCODE_GET_EEE_STATISTICS, + &cmd_status, tx_counter, rx_counter, NULL); + + if (cmd_status != I40E_AQ_RUN_PHY_ACTIVITY_CMD_STATUS_SUCCESS) + retval = I40E_ERR_ADMIN_QUEUE_ERROR; + + return retval; +} + +/** + * i40e_lpi_stat_update - update LPI counters with values relative to offset + * @hw: pointer to the hw struct + * @offset_loaded: flag indicating need of writing current value to offset + * @tx_offset: pointer to offset of TX LPI counter + * @tx_stat: pointer to value of TX LPI counter + * @rx_offset: pointer to offset of RX LPI counter + * @rx_stat: pointer to value of RX LPI counter + * + * Update Low Power Idle (LPI) mode counters while having regard to passed + * offsets. + **/ +enum i40e_status_code i40e_lpi_stat_update(struct i40e_hw *hw, + bool offset_loaded, u64 *tx_offset, + u64 *tx_stat, u64 *rx_offset, + u64 *rx_stat) +{ + enum i40e_status_code retval; + u32 tx_counter, rx_counter; + + retval = i40e_get_lpi_counters(hw, &tx_counter, &rx_counter); + if (retval) + goto err; + + if (!offset_loaded) { + *tx_offset = tx_counter; + *rx_offset = rx_counter; + } + + if (tx_counter >= *tx_offset) + *tx_stat = (u32)(tx_counter - *tx_offset); + else + *tx_stat = (u32)((tx_counter + BIT_ULL(32)) - *tx_offset); + + if (rx_counter >= *rx_offset) + *rx_stat = (u32)(rx_counter - *rx_offset); + else + *rx_stat = (u32)((rx_counter + BIT_ULL(32)) - *rx_offset); +err: + return retval; +} + +/** * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register * @hw: pointer to the hw struct * @reg_addr: register address * @reg_val: ptr to register value * @cmd_details: pointer to command details structure or NULL * * Use the firmware to read the Rx control register, * especially useful if the Rx unit is under heavy pressure **/ enum i40e_status_code i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, u32 reg_addr, u32 *reg_val, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; enum i40e_status_code status; if (reg_val == NULL) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read); cmd_resp->address = CPU_TO_LE32(reg_addr); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status == I40E_SUCCESS) *reg_val = LE32_TO_CPU(cmd_resp->value); return status; } /** * i40e_read_rx_ctl - read from an Rx control register * @hw: pointer to the hw struct * @reg_addr: register address **/ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) { enum i40e_status_code status = I40E_SUCCESS; bool use_register; int retry = 5; u32 val = 0; use_register = (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5)) || (hw->mac.type == I40E_MAC_X722)); if (!use_register) { do_retry: status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { i40e_msec_delay(1); retry--; goto do_retry; } } /* if the AQ access failed, try the old-fashioned way */ if (status || use_register) val = rd32(hw, reg_addr); return val; } /** * i40e_aq_rx_ctl_write_register * @hw: pointer to the hw struct * @reg_addr: register address * @reg_val: register value * @cmd_details: pointer to command details structure or NULL * * Use the firmware to write to an Rx control register, * especially useful if the Rx unit is under heavy pressure **/ enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, u32 reg_addr, u32 reg_val, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_rx_ctl_reg_read_write *cmd = (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write); cmd->address = CPU_TO_LE32(reg_addr); cmd->value = CPU_TO_LE32(reg_val); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_write_rx_ctl - write to an Rx control register * @hw: pointer to the hw struct * @reg_addr: register address * @reg_val: register value **/ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) { enum i40e_status_code status = I40E_SUCCESS; bool use_register; int retry = 5; use_register = (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5)) || (hw->mac.type == I40E_MAC_X722)); if (!use_register) { do_retry: status = i40e_aq_rx_ctl_write_register(hw, reg_addr, reg_val, NULL); if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { i40e_msec_delay(1); retry--; goto do_retry; } } /* if the AQ access failed, try the old-fashioned way */ if (status || use_register) wr32(hw, reg_addr, reg_val); } /** * i40e_aq_set_phy_register * @hw: pointer to the hw struct * @phy_select: select which phy should be accessed * @dev_addr: PHY device address + * @page_change: enable auto page change * @reg_addr: PHY register address * @reg_val: new register value * @cmd_details: pointer to command details structure or NULL * * Write the external PHY register. **/ enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw, - u8 phy_select, u8 dev_addr, + u8 phy_select, u8 dev_addr, bool page_change, u32 reg_addr, u32 reg_val, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_phy_register_access *cmd = (struct i40e_aqc_phy_register_access *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_phy_register); cmd->phy_interface = phy_select; cmd->dev_addres = dev_addr; cmd->reg_address = CPU_TO_LE32(reg_addr); cmd->reg_value = CPU_TO_LE32(reg_val); + if (!page_change) + cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_phy_register * @hw: pointer to the hw struct * @phy_select: select which phy should be accessed * @dev_addr: PHY device address + * @page_change: enable auto page change * @reg_addr: PHY register address * @reg_val: read register value * @cmd_details: pointer to command details structure or NULL * * Read the external PHY register. **/ enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw, - u8 phy_select, u8 dev_addr, + u8 phy_select, u8 dev_addr, bool page_change, u32 reg_addr, u32 *reg_val, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_phy_register_access *cmd = (struct i40e_aqc_phy_register_access *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_phy_register); cmd->phy_interface = phy_select; cmd->dev_addres = dev_addr; cmd->reg_address = CPU_TO_LE32(reg_addr); + if (!page_change) + cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) *reg_val = LE32_TO_CPU(cmd->reg_value); return status; } +/** + * i40e_aq_run_phy_activity + * @hw: pointer to the hw struct + * @activity_id: ID of DNL activity to run + * @dnl_opcode: opcode passed to DNL script + * @cmd_status: pointer to memory to write return value of DNL script + * @data0: pointer to memory for first 4 bytes of data returned by DNL script + * @data1: pointer to memory for last 4 bytes of data returned by DNL script + * @cmd_details: pointer to command details structure or NULL + * + * Run DNL admin command. + **/ +enum i40e_status_code +i40e_aq_run_phy_activity(struct i40e_hw *hw, u16 activity_id, u32 dnl_opcode, + u32 *cmd_status, u32 *data0, u32 *data1, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aqc_run_phy_activity *cmd; + enum i40e_status_code retval; + struct i40e_aq_desc desc; + + cmd = (struct i40e_aqc_run_phy_activity *)&desc.params.raw; + + if (!cmd_status || !data0 || !data1) { + retval = I40E_ERR_PARAM; + goto err; + } + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_run_phy_activity); + + cmd->activity_id = CPU_TO_LE16(activity_id); + cmd->params.cmd.dnl_opcode = CPU_TO_LE32(dnl_opcode); + + retval = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + if (retval) + goto err; + + *cmd_status = LE32_TO_CPU(cmd->params.resp.cmd_status); + *data0 = LE32_TO_CPU(cmd->params.resp.data0); + *data1 = LE32_TO_CPU(cmd->params.resp.data1); +err: + return retval; +} /** * i40e_aq_send_msg_to_pf * @hw: pointer to the hardware structure * @v_opcode: opcodes for VF-PF communication * @v_retval: return error code * @msg: pointer to the msg buffer * @msglen: msg length * @cmd_details: pointer to command details * * Send message to PF driver using admin queue. By default, this message * is sent asynchronously, i.e. i40e_asq_send_command() does not wait for * completion before returning. **/ enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw, enum virtchnl_ops v_opcode, enum i40e_status_code v_retval, u8 *msg, u16 msglen, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_asq_cmd_details details; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI); desc.cookie_high = CPU_TO_LE32(v_opcode); desc.cookie_low = CPU_TO_LE32(v_retval); if (msglen) { desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (msglen > I40E_AQ_LARGE_BUF) desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); desc.datalen = CPU_TO_LE16(msglen); } if (!cmd_details) { i40e_memset(&details, 0, sizeof(details), I40E_NONDMA_MEM); details.async = TRUE; cmd_details = &details; } status = i40e_asq_send_command(hw, (struct i40e_aq_desc *)&desc, msg, msglen, cmd_details); return status; } /** * i40e_vf_parse_hw_config * @hw: pointer to the hardware structure * @msg: pointer to the virtual channel VF resource structure * * Given a VF resource message from the PF, populate the hw struct * with appropriate information. **/ void i40e_vf_parse_hw_config(struct i40e_hw *hw, struct virtchnl_vf_resource *msg) { struct virtchnl_vsi_resource *vsi_res; int i; vsi_res = &msg->vsi_res[0]; hw->dev_caps.num_vsis = msg->num_vsis; hw->dev_caps.num_rx_qp = msg->num_queue_pairs; hw->dev_caps.num_tx_qp = msg->num_queue_pairs; hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; hw->dev_caps.dcb = msg->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_L2; hw->dev_caps.iwarp = (msg->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0; for (i = 0; i < msg->num_vsis; i++) { if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) { i40e_memcpy(hw->mac.perm_addr, vsi_res->default_mac_addr, ETH_ALEN, I40E_NONDMA_TO_NONDMA); i40e_memcpy(hw->mac.addr, vsi_res->default_mac_addr, ETH_ALEN, I40E_NONDMA_TO_NONDMA); } vsi_res++; } } /** * i40e_vf_reset * @hw: pointer to the hardware structure * * Send a VF_RESET message to the PF. Does not wait for response from PF * as none will be forthcoming. Immediately after calling this function, * the admin queue should be shut down and (optionally) reinitialized. **/ enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw) { return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF, I40E_SUCCESS, NULL, 0, NULL); } /** * i40e_aq_set_arp_proxy_config * @hw: pointer to the HW structure * @proxy_config: pointer to proxy config command table struct * @cmd_details: pointer to command details * * Set ARP offload parameters from pre-populated * i40e_aqc_arp_proxy_data struct **/ enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw, struct i40e_aqc_arp_proxy_data *proxy_config, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; enum i40e_status_code status; if (!proxy_config) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_proxy_config); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); desc.params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD((u64)proxy_config)); desc.params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD((u64)proxy_config)); desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_arp_proxy_data)); status = i40e_asq_send_command(hw, &desc, proxy_config, sizeof(struct i40e_aqc_arp_proxy_data), cmd_details); return status; } /** * i40e_aq_opc_set_ns_proxy_table_entry * @hw: pointer to the HW structure * @ns_proxy_table_entry: pointer to NS table entry command struct * @cmd_details: pointer to command details * * Set IPv6 Neighbor Solicitation (NS) protocol offload parameters * from pre-populated i40e_aqc_ns_proxy_data struct **/ enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw, struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; enum i40e_status_code status; if (!ns_proxy_table_entry) return I40E_ERR_PARAM; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_ns_proxy_table_entry); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); desc.params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD((u64)ns_proxy_table_entry)); desc.params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD((u64)ns_proxy_table_entry)); desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_ns_proxy_data)); status = i40e_asq_send_command(hw, &desc, ns_proxy_table_entry, sizeof(struct i40e_aqc_ns_proxy_data), cmd_details); return status; } /** * i40e_aq_set_clear_wol_filter * @hw: pointer to the hw struct * @filter_index: index of filter to modify (0-7) * @filter: buffer containing filter to be set * @set_filter: TRUE to set filter, FALSE to clear filter * @no_wol_tco: if TRUE, pass through packets cannot cause wake-up * if FALSE, pass through packets may cause wake-up * @filter_valid: TRUE if filter action is valid * @no_wol_tco_valid: TRUE if no WoL in TCO traffic action valid * @cmd_details: pointer to command details structure or NULL * * Set or clear WoL filter for port attached to the PF **/ enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw, u8 filter_index, struct i40e_aqc_set_wol_filter_data *filter, bool set_filter, bool no_wol_tco, bool filter_valid, bool no_wol_tco_valid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_wol_filter *cmd = (struct i40e_aqc_set_wol_filter *)&desc.params.raw; enum i40e_status_code status; u16 cmd_flags = 0; u16 valid_flags = 0; u16 buff_len = 0; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_wol_filter); if (filter_index >= I40E_AQC_MAX_NUM_WOL_FILTERS) return I40E_ERR_PARAM; cmd->filter_index = CPU_TO_LE16(filter_index); if (set_filter) { if (!filter) return I40E_ERR_PARAM; cmd_flags |= I40E_AQC_SET_WOL_FILTER; cmd_flags |= I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR; } if (no_wol_tco) cmd_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL; cmd->cmd_flags = CPU_TO_LE16(cmd_flags); if (filter_valid) valid_flags |= I40E_AQC_SET_WOL_FILTER_ACTION_VALID; if (no_wol_tco_valid) valid_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID; cmd->valid_flags = CPU_TO_LE16(valid_flags); buff_len = sizeof(*filter); desc.datalen = CPU_TO_LE16(buff_len); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)filter)); cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)filter)); status = i40e_asq_send_command(hw, &desc, filter, buff_len, cmd_details); return status; } /** * i40e_aq_get_wake_event_reason * @hw: pointer to the hw struct * @wake_reason: return value, index of matching filter * @cmd_details: pointer to command details structure or NULL * * Get information for the reason of a Wake Up event **/ enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw, u16 *wake_reason, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_wake_reason_completion *resp = (struct i40e_aqc_get_wake_reason_completion *)&desc.params.raw; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_wake_reason); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status == I40E_SUCCESS) *wake_reason = LE16_TO_CPU(resp->wake_reason); return status; } /** * i40e_aq_clear_all_wol_filters * @hw: pointer to the hw struct * @cmd_details: pointer to command details structure or NULL * * Get information for the reason of a Wake Up event **/ enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; enum i40e_status_code status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_clear_all_wol_filters); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } Index: releng/11.3/sys/dev/ixl/i40e_dcb.c =================================================================== --- releng/11.3/sys/dev/ixl/i40e_dcb.c (revision 349180) +++ releng/11.3/sys/dev/ixl/i40e_dcb.c (revision 349181) @@ -1,1385 +1,1423 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "i40e_adminq.h" #include "i40e_prototype.h" #include "i40e_dcb.h" /** * i40e_get_dcbx_status * @hw: pointer to the hw struct * @status: Embedded DCBX Engine Status * * Get the DCBX status from the Firmware **/ enum i40e_status_code i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status) { u32 reg; if (!status) return I40E_ERR_PARAM; reg = rd32(hw, I40E_PRTDCB_GENS); *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >> I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT); return I40E_SUCCESS; } /** * i40e_parse_ieee_etscfg_tlv * @tlv: IEEE 802.1Qaz ETS CFG TLV * @dcbcfg: Local store to update ETS CFG data * * Parses IEEE 802.1Qaz ETS CFG TLV **/ static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { struct i40e_dcb_ets_config *etscfg; u8 *buf = tlv->tlvinfo; u16 offset = 0; u8 priority; int i; /* First Octet post subtype * -------------------------- * |will-|CBS | Re- | Max | * |ing | |served| TCs | * -------------------------- * |1bit | 1bit|3 bits|3bits| */ etscfg = &dcbcfg->etscfg; etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >> I40E_IEEE_ETS_WILLING_SHIFT); etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >> I40E_IEEE_ETS_CBS_SHIFT); etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >> I40E_IEEE_ETS_MAXTC_SHIFT); /* Move offset to Priority Assignment Table */ offset++; /* Priority Assignment Table (4 octets) * Octets:| 1 | 2 | 3 | 4 | * ----------------------------------------- * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| * ----------------------------------------- * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| * ----------------------------------------- */ for (i = 0; i < 4; i++) { priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >> I40E_IEEE_ETS_PRIO_1_SHIFT); etscfg->prioritytable[i * 2] = priority; priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >> I40E_IEEE_ETS_PRIO_0_SHIFT); etscfg->prioritytable[i * 2 + 1] = priority; offset++; } /* TC Bandwidth Table (8 octets) * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | * --------------------------------- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| * --------------------------------- */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) etscfg->tcbwtable[i] = buf[offset++]; /* TSA Assignment Table (8 octets) * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | * --------------------------------- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| * --------------------------------- */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) etscfg->tsatable[i] = buf[offset++]; } /** * i40e_parse_ieee_etsrec_tlv * @tlv: IEEE 802.1Qaz ETS REC TLV * @dcbcfg: Local store to update ETS REC data * * Parses IEEE 802.1Qaz ETS REC TLV **/ static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { u8 *buf = tlv->tlvinfo; u16 offset = 0; u8 priority; int i; /* Move offset to priority table */ offset++; /* Priority Assignment Table (4 octets) * Octets:| 1 | 2 | 3 | 4 | * ----------------------------------------- * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| * ----------------------------------------- * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| * ----------------------------------------- */ for (i = 0; i < 4; i++) { priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >> I40E_IEEE_ETS_PRIO_1_SHIFT); dcbcfg->etsrec.prioritytable[i*2] = priority; priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >> I40E_IEEE_ETS_PRIO_0_SHIFT); dcbcfg->etsrec.prioritytable[i*2 + 1] = priority; offset++; } /* TC Bandwidth Table (8 octets) * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | * --------------------------------- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| * --------------------------------- */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) dcbcfg->etsrec.tcbwtable[i] = buf[offset++]; /* TSA Assignment Table (8 octets) * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | * --------------------------------- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| * --------------------------------- */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) dcbcfg->etsrec.tsatable[i] = buf[offset++]; } /** * i40e_parse_ieee_pfccfg_tlv * @tlv: IEEE 802.1Qaz PFC CFG TLV * @dcbcfg: Local store to update PFC CFG data * * Parses IEEE 802.1Qaz PFC CFG TLV **/ static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { u8 *buf = tlv->tlvinfo; /* ---------------------------------------- * |will-|MBC | Re- | PFC | PFC Enable | * |ing | |served| cap | | * ----------------------------------------- * |1bit | 1bit|2 bits|4bits| 1 octet | */ dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >> I40E_IEEE_PFC_WILLING_SHIFT); dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >> I40E_IEEE_PFC_MBC_SHIFT); dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >> I40E_IEEE_PFC_CAP_SHIFT); dcbcfg->pfc.pfcenable = buf[1]; } /** * i40e_parse_ieee_app_tlv * @tlv: IEEE 802.1Qaz APP TLV * @dcbcfg: Local store to update APP PRIO data * * Parses IEEE 802.1Qaz APP PRIO TLV **/ static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { u16 typelength; u16 offset = 0; u16 length; int i = 0; u8 *buf; typelength = I40E_NTOHS(tlv->typelength); length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> I40E_LLDP_TLV_LEN_SHIFT); buf = tlv->tlvinfo; /* The App priority table starts 5 octets after TLV header */ length -= (sizeof(tlv->ouisubtype) + 1); /* Move offset to App Priority Table */ offset++; /* Application Priority Table (3 octets) * Octets:| 1 | 2 | 3 | * ----------------------------------------- * |Priority|Rsrvd| Sel | Protocol ID | * ----------------------------------------- * Bits:|23 21|20 19|18 16|15 0| * ----------------------------------------- */ while (offset < length) { dcbcfg->app[i].priority = (u8)((buf[offset] & I40E_IEEE_APP_PRIO_MASK) >> I40E_IEEE_APP_PRIO_SHIFT); dcbcfg->app[i].selector = (u8)((buf[offset] & I40E_IEEE_APP_SEL_MASK) >> I40E_IEEE_APP_SEL_SHIFT); dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) | buf[offset + 2]; /* Move to next app */ offset += 3; i++; if (i >= I40E_DCBX_MAX_APPS) break; } dcbcfg->numapps = i; } /** * i40e_parse_ieee_etsrec_tlv * @tlv: IEEE 802.1Qaz TLV * @dcbcfg: Local store to update ETS REC data * * Get the TLV subtype and send it to parsing function * based on the subtype value **/ static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { u32 ouisubtype; u8 subtype; ouisubtype = I40E_NTOHL(tlv->ouisubtype); subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >> I40E_LLDP_TLV_SUBTYPE_SHIFT); switch (subtype) { case I40E_IEEE_SUBTYPE_ETS_CFG: i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg); break; case I40E_IEEE_SUBTYPE_ETS_REC: i40e_parse_ieee_etsrec_tlv(tlv, dcbcfg); break; case I40E_IEEE_SUBTYPE_PFC_CFG: i40e_parse_ieee_pfccfg_tlv(tlv, dcbcfg); break; case I40E_IEEE_SUBTYPE_APP_PRI: i40e_parse_ieee_app_tlv(tlv, dcbcfg); break; default: break; } } /** * i40e_parse_cee_pgcfg_tlv * @tlv: CEE DCBX PG CFG TLV * @dcbcfg: Local store to update ETS CFG data * * Parses CEE DCBX PG CFG TLV **/ static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { struct i40e_dcb_ets_config *etscfg; u8 *buf = tlv->tlvinfo; u16 offset = 0; u8 priority; int i; etscfg = &dcbcfg->etscfg; if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK) etscfg->willing = 1; etscfg->cbs = 0; /* Priority Group Table (4 octets) * Octets:| 1 | 2 | 3 | 4 | * ----------------------------------------- * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| * ----------------------------------------- * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| * ----------------------------------------- */ for (i = 0; i < 4; i++) { priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_1_MASK) >> I40E_CEE_PGID_PRIO_1_SHIFT); etscfg->prioritytable[i * 2] = priority; priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_0_MASK) >> I40E_CEE_PGID_PRIO_0_SHIFT); etscfg->prioritytable[i * 2 + 1] = priority; offset++; } /* PG Percentage Table (8 octets) * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | * --------------------------------- * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7| * --------------------------------- */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) etscfg->tcbwtable[i] = buf[offset++]; /* Number of TCs supported (1 octet) */ etscfg->maxtcs = buf[offset]; } /** * i40e_parse_cee_pfccfg_tlv * @tlv: CEE DCBX PFC CFG TLV * @dcbcfg: Local store to update PFC CFG data * * Parses CEE DCBX PFC CFG TLV **/ static void i40e_parse_cee_pfccfg_tlv(struct i40e_cee_feat_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { u8 *buf = tlv->tlvinfo; if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK) dcbcfg->pfc.willing = 1; /* ------------------------ * | PFC Enable | PFC TCs | * ------------------------ * | 1 octet | 1 octet | */ dcbcfg->pfc.pfcenable = buf[0]; dcbcfg->pfc.pfccap = buf[1]; } /** * i40e_parse_cee_app_tlv * @tlv: CEE DCBX APP TLV * @dcbcfg: Local store to update APP PRIO data * * Parses CEE DCBX APP PRIO TLV **/ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { u16 length, typelength, offset = 0; struct i40e_cee_app_prio *app; u8 i; typelength = I40E_NTOHS(tlv->hdr.typelen); length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> I40E_LLDP_TLV_LEN_SHIFT); dcbcfg->numapps = length / sizeof(*app); if (!dcbcfg->numapps) return; if (dcbcfg->numapps > I40E_DCBX_MAX_APPS) dcbcfg->numapps = I40E_DCBX_MAX_APPS; for (i = 0; i < dcbcfg->numapps; i++) { u8 up, selector; app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset); for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) { if (app->prio_map & BIT(up)) break; } dcbcfg->app[i].priority = up; /* Get Selector from lower 2 bits, and convert to IEEE */ selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK); switch (selector) { case I40E_CEE_APP_SEL_ETHTYPE: dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE; break; case I40E_CEE_APP_SEL_TCPIP: dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP; break; default: /* Keep selector as it is for unknown types */ dcbcfg->app[i].selector = selector; } dcbcfg->app[i].protocolid = I40E_NTOHS(app->protocol); /* Move to next app */ offset += sizeof(*app); } } /** * i40e_parse_cee_tlv * @tlv: CEE DCBX TLV * @dcbcfg: Local store to update DCBX config data * * Get the TLV subtype and send it to parsing function * based on the subtype value **/ static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { u16 len, tlvlen, sublen, typelength; struct i40e_cee_feat_tlv *sub_tlv; u8 subtype, feat_tlv_count = 0; u32 ouisubtype; ouisubtype = I40E_NTOHL(tlv->ouisubtype); subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >> I40E_LLDP_TLV_SUBTYPE_SHIFT); /* Return if not CEE DCBX */ if (subtype != I40E_CEE_DCBX_TYPE) return; typelength = I40E_NTOHS(tlv->typelength); tlvlen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> I40E_LLDP_TLV_LEN_SHIFT); len = sizeof(tlv->typelength) + sizeof(ouisubtype) + sizeof(struct i40e_cee_ctrl_tlv); /* Return if no CEE DCBX Feature TLVs */ if (tlvlen <= len) return; sub_tlv = (struct i40e_cee_feat_tlv *)((char *)tlv + len); while (feat_tlv_count < I40E_CEE_MAX_FEAT_TYPE) { typelength = I40E_NTOHS(sub_tlv->hdr.typelen); sublen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> I40E_LLDP_TLV_LEN_SHIFT); subtype = (u8)((typelength & I40E_LLDP_TLV_TYPE_MASK) >> I40E_LLDP_TLV_TYPE_SHIFT); switch (subtype) { case I40E_CEE_SUBTYPE_PG_CFG: i40e_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg); break; case I40E_CEE_SUBTYPE_PFC_CFG: i40e_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg); break; case I40E_CEE_SUBTYPE_APP_PRI: i40e_parse_cee_app_tlv(sub_tlv, dcbcfg); break; default: return; /* Invalid Sub-type return */ } feat_tlv_count++; /* Move to next sub TLV */ sub_tlv = (struct i40e_cee_feat_tlv *)((char *)sub_tlv + sizeof(sub_tlv->hdr.typelen) + sublen); } } /** * i40e_parse_org_tlv * @tlv: Organization specific TLV * @dcbcfg: Local store to update ETS REC data * * Currently only IEEE 802.1Qaz TLV is supported, all others * will be returned **/ static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { u32 ouisubtype; u32 oui; ouisubtype = I40E_NTOHL(tlv->ouisubtype); oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >> I40E_LLDP_TLV_OUI_SHIFT); switch (oui) { case I40E_IEEE_8021QAZ_OUI: i40e_parse_ieee_tlv(tlv, dcbcfg); break; case I40E_CEE_DCBX_OUI: i40e_parse_cee_tlv(tlv, dcbcfg); break; default: break; } } /** * i40e_lldp_to_dcb_config * @lldpmib: LLDPDU to be parsed * @dcbcfg: store for LLDPDU data * * Parse DCB configuration from the LLDPDU **/ enum i40e_status_code i40e_lldp_to_dcb_config(u8 *lldpmib, struct i40e_dcbx_config *dcbcfg) { enum i40e_status_code ret = I40E_SUCCESS; struct i40e_lldp_org_tlv *tlv; u16 type; u16 length; u16 typelength; u16 offset = 0; if (!lldpmib || !dcbcfg) return I40E_ERR_PARAM; /* set to the start of LLDPDU */ lldpmib += I40E_LLDP_MIB_HLEN; tlv = (struct i40e_lldp_org_tlv *)lldpmib; while (1) { typelength = I40E_NTOHS(tlv->typelength); type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >> I40E_LLDP_TLV_TYPE_SHIFT); length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> I40E_LLDP_TLV_LEN_SHIFT); offset += sizeof(typelength) + length; /* END TLV or beyond LLDPDU size */ if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE)) break; switch (type) { case I40E_TLV_TYPE_ORG: i40e_parse_org_tlv(tlv, dcbcfg); break; default: break; } /* Move to next TLV */ tlv = (struct i40e_lldp_org_tlv *)((char *)tlv + sizeof(tlv->typelength) + length); } return ret; } /** * i40e_aq_get_dcb_config * @hw: pointer to the hw struct * @mib_type: mib type for the query * @bridgetype: bridge type for the query (remote) * @dcbcfg: store for LLDPDU data * * Query DCB configuration from the Firmware **/ enum i40e_status_code i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, u8 bridgetype, struct i40e_dcbx_config *dcbcfg) { enum i40e_status_code ret = I40E_SUCCESS; struct i40e_virt_mem mem; u8 *lldpmib; /* Allocate the LLDPDU */ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE); if (ret) return ret; lldpmib = (u8 *)mem.va; ret = i40e_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib, I40E_LLDPDU_SIZE, NULL, NULL, NULL); if (ret) goto free_mem; /* Parse LLDP MIB to get dcb configuration */ ret = i40e_lldp_to_dcb_config(lldpmib, dcbcfg); free_mem: i40e_free_virt_mem(hw, &mem); return ret; } /** * i40e_cee_to_dcb_v1_config * @cee_cfg: pointer to CEE v1 response configuration struct * @dcbcfg: DCB configuration struct * * Convert CEE v1 configuration from firmware to DCB configuration **/ static void i40e_cee_to_dcb_v1_config( struct i40e_aqc_get_cee_dcb_cfg_v1_resp *cee_cfg, struct i40e_dcbx_config *dcbcfg) { u16 status, tlv_status = LE16_TO_CPU(cee_cfg->tlv_status); u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio); u8 i, tc, err; /* CEE PG data to ETS config */ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; /* Note that the FW creates the oper_prio_tc nibbles reversed * from those in the CEE Priority Group sub-TLV. */ for (i = 0; i < 4; i++) { tc = (u8)((cee_cfg->oper_prio_tc[i] & I40E_CEE_PGID_PRIO_0_MASK) >> I40E_CEE_PGID_PRIO_0_SHIFT); dcbcfg->etscfg.prioritytable[i*2] = tc; tc = (u8)((cee_cfg->oper_prio_tc[i] & I40E_CEE_PGID_PRIO_1_MASK) >> I40E_CEE_PGID_PRIO_1_SHIFT); dcbcfg->etscfg.prioritytable[i*2 + 1] = tc; } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i]; for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) { /* Map it to next empty TC */ dcbcfg->etscfg.prioritytable[i] = cee_cfg->oper_num_tc - 1; dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT; } else { dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS; } } /* CEE PFC data to ETS config */ dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en; dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >> I40E_AQC_CEE_APP_STATUS_SHIFT; err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; /* Add APPs if Error is False */ if (!err) { /* CEE operating configuration supports FCoE/iSCSI/FIP only */ dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS; /* FCoE APP */ dcbcfg->app[0].priority = (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >> I40E_AQC_CEE_APP_FCOE_SHIFT; dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE; dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE; /* iSCSI APP */ dcbcfg->app[1].priority = (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >> I40E_AQC_CEE_APP_ISCSI_SHIFT; dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP; dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI; /* FIP APP */ dcbcfg->app[2].priority = (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >> I40E_AQC_CEE_APP_FIP_SHIFT; dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE; dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP; } } /** * i40e_cee_to_dcb_config * @cee_cfg: pointer to CEE configuration struct * @dcbcfg: DCB configuration struct * * Convert CEE configuration from firmware to DCB configuration **/ static void i40e_cee_to_dcb_config( struct i40e_aqc_get_cee_dcb_cfg_resp *cee_cfg, struct i40e_dcbx_config *dcbcfg) { u32 status, tlv_status = LE32_TO_CPU(cee_cfg->tlv_status); u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio); u8 i, tc, err, sync, oper; /* CEE PG data to ETS config */ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; /* Note that the FW creates the oper_prio_tc nibbles reversed * from those in the CEE Priority Group sub-TLV. */ for (i = 0; i < 4; i++) { tc = (u8)((cee_cfg->oper_prio_tc[i] & I40E_CEE_PGID_PRIO_0_MASK) >> I40E_CEE_PGID_PRIO_0_SHIFT); dcbcfg->etscfg.prioritytable[i*2] = tc; tc = (u8)((cee_cfg->oper_prio_tc[i] & I40E_CEE_PGID_PRIO_1_MASK) >> I40E_CEE_PGID_PRIO_1_SHIFT); dcbcfg->etscfg.prioritytable[i*2 + 1] = tc; } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i]; for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) { /* Map it to next empty TC */ dcbcfg->etscfg.prioritytable[i] = cee_cfg->oper_num_tc - 1; dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT; } else { dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS; } } /* CEE PFC data to ETS config */ dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en; dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; i = 0; status = (tlv_status & I40E_AQC_CEE_FCOE_STATUS_MASK) >> I40E_AQC_CEE_FCOE_STATUS_SHIFT; err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; /* Add FCoE APP if Error is False and Oper/Sync is True */ if (!err && sync && oper) { /* FCoE APP */ dcbcfg->app[i].priority = (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >> I40E_AQC_CEE_APP_FCOE_SHIFT; dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE; dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FCOE; i++; } status = (tlv_status & I40E_AQC_CEE_ISCSI_STATUS_MASK) >> I40E_AQC_CEE_ISCSI_STATUS_SHIFT; err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; /* Add iSCSI APP if Error is False and Oper/Sync is True */ if (!err && sync && oper) { /* iSCSI APP */ dcbcfg->app[i].priority = (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >> I40E_AQC_CEE_APP_ISCSI_SHIFT; dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP; dcbcfg->app[i].protocolid = I40E_APP_PROTOID_ISCSI; i++; } status = (tlv_status & I40E_AQC_CEE_FIP_STATUS_MASK) >> I40E_AQC_CEE_FIP_STATUS_SHIFT; err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; /* Add FIP APP if Error is False and Oper/Sync is True */ if (!err && sync && oper) { /* FIP APP */ dcbcfg->app[i].priority = (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >> I40E_AQC_CEE_APP_FIP_SHIFT; dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE; dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FIP; i++; } dcbcfg->numapps = i; } /** * i40e_get_ieee_dcb_config * @hw: pointer to the hw struct * * Get IEEE mode DCB configuration from the Firmware **/ static enum i40e_status_code i40e_get_ieee_dcb_config(struct i40e_hw *hw) { enum i40e_status_code ret = I40E_SUCCESS; /* IEEE mode */ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE; /* Get Local DCB Config */ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0, &hw->local_dcbx_config); if (ret) goto out; /* Get Remote DCB Config */ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, &hw->remote_dcbx_config); /* Don't treat ENOENT as an error for Remote MIBs */ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) ret = I40E_SUCCESS; out: return ret; } /** * i40e_get_dcb_config * @hw: pointer to the hw struct * * Get DCB configuration from the Firmware **/ enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw) { enum i40e_status_code ret = I40E_SUCCESS; struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg; struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg; /* If Firmware version < v4.33 on X710/XL710, IEEE only */ if ((hw->mac.type == I40E_MAC_XL710) && (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || (hw->aq.fw_maj_ver < 4))) return i40e_get_ieee_dcb_config(hw); /* If Firmware version == v4.33 on X710/XL710, use old CEE struct */ if ((hw->mac.type == I40E_MAC_XL710) && ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33))) { ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg, sizeof(cee_v1_cfg), NULL); if (ret == I40E_SUCCESS) { /* CEE mode */ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; hw->local_dcbx_config.tlv_status = LE16_TO_CPU(cee_v1_cfg.tlv_status); i40e_cee_to_dcb_v1_config(&cee_v1_cfg, &hw->local_dcbx_config); } } else { ret = i40e_aq_get_cee_dcb_config(hw, &cee_cfg, sizeof(cee_cfg), NULL); if (ret == I40E_SUCCESS) { /* CEE mode */ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; hw->local_dcbx_config.tlv_status = LE32_TO_CPU(cee_cfg.tlv_status); i40e_cee_to_dcb_config(&cee_cfg, &hw->local_dcbx_config); } } /* CEE mode not enabled try querying IEEE data */ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) return i40e_get_ieee_dcb_config(hw); if (ret != I40E_SUCCESS) goto out; /* Get CEE DCB Desired Config */ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0, &hw->desired_dcbx_config); if (ret) goto out; /* Get Remote DCB Config */ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, &hw->remote_dcbx_config); /* Don't treat ENOENT as an error for Remote MIBs */ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) ret = I40E_SUCCESS; out: return ret; } /** * i40e_init_dcb * @hw: pointer to the hw struct + * @enable_mib_change: enable mib change event * * Update DCB configuration from the Firmware **/ -enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw) +enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change) { enum i40e_status_code ret = I40E_SUCCESS; struct i40e_lldp_variables lldp_cfg; u8 adminstatus = 0; if (!hw->func_caps.dcb) - return ret; + return I40E_NOT_SUPPORTED; /* Read LLDP NVM area */ ret = i40e_read_lldp_cfg(hw, &lldp_cfg); if (ret) - return ret; + return I40E_ERR_NOT_READY; /* Get the LLDP AdminStatus for the current port */ adminstatus = lldp_cfg.adminstatus >> (hw->port * 4); adminstatus &= 0xF; /* LLDP agent disabled */ if (!adminstatus) { hw->dcbx_status = I40E_DCBX_STATUS_DISABLED; - return ret; + return I40E_ERR_NOT_READY; } /* Get DCBX status */ ret = i40e_get_dcbx_status(hw, &hw->dcbx_status); if (ret) return ret; /* Check the DCBX Status */ - switch (hw->dcbx_status) { - case I40E_DCBX_STATUS_DONE: - case I40E_DCBX_STATUS_IN_PROGRESS: + if (hw->dcbx_status == I40E_DCBX_STATUS_DONE || + hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) { /* Get current DCBX configuration */ ret = i40e_get_dcb_config(hw); if (ret) return ret; - break; - case I40E_DCBX_STATUS_DISABLED: - return ret; - case I40E_DCBX_STATUS_NOT_STARTED: - case I40E_DCBX_STATUS_MULTIPLE_PEERS: - default: - break; + } else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) { + return I40E_ERR_NOT_READY; } /* Configure the LLDP MIB change event */ - ret = i40e_aq_cfg_lldp_mib_change_event(hw, TRUE, NULL); + if (enable_mib_change) + ret = i40e_aq_cfg_lldp_mib_change_event(hw, TRUE, NULL); + + return ret; +} + +/** + * i40e_get_fw_lldp_status + * @hw: pointer to the hw struct + * @lldp_status: pointer to the status enum + * + * Get status of FW Link Layer Discovery Protocol (LLDP) Agent. + * Status of agent is reported via @lldp_status parameter. + **/ +enum i40e_status_code +i40e_get_fw_lldp_status(struct i40e_hw *hw, + enum i40e_get_fw_lldp_status_resp *lldp_status) +{ + enum i40e_status_code ret; + struct i40e_virt_mem mem; + u8 *lldpmib; + + if (!lldp_status) + return I40E_ERR_PARAM; + + /* Allocate buffer for the LLDPDU */ + ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE); if (ret) return ret; + lldpmib = (u8 *)mem.va; + ret = i40e_aq_get_lldp_mib(hw, 0, 0, (void *)lldpmib, + I40E_LLDPDU_SIZE, NULL, NULL, NULL); + + if (ret == I40E_SUCCESS) { + *lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED; + } else if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) { + /* MIB is not available yet but the agent is running */ + *lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED; + ret = I40E_SUCCESS; + } else if (hw->aq.asq_last_status == I40E_AQ_RC_EPERM) { + *lldp_status = I40E_GET_FW_LLDP_STATUS_DISABLED; + ret = I40E_SUCCESS; + } + + i40e_free_virt_mem(hw, &mem); return ret; } + /** * i40e_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format * @tlv: Fill the ETS config data in IEEE format * @dcbcfg: Local store which holds the DCB Config * * Prepare IEEE 802.1Qaz ETS CFG TLV **/ static void i40e_add_ieee_ets_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { u8 priority0, priority1, maxtcwilling = 0; struct i40e_dcb_ets_config *etscfg; u16 offset = 0, typelength, i; u8 *buf = tlv->tlvinfo; u32 ouisubtype; typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) | I40E_IEEE_ETS_TLV_LENGTH); tlv->typelength = I40E_HTONS(typelength); ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) | I40E_IEEE_SUBTYPE_ETS_CFG); tlv->ouisubtype = I40E_HTONL(ouisubtype); /* First Octet post subtype * -------------------------- * |will-|CBS | Re- | Max | * |ing | |served| TCs | * -------------------------- * |1bit | 1bit|3 bits|3bits| */ etscfg = &dcbcfg->etscfg; if (etscfg->willing) maxtcwilling = BIT(I40E_IEEE_ETS_WILLING_SHIFT); maxtcwilling |= etscfg->maxtcs & I40E_IEEE_ETS_MAXTC_MASK; buf[offset] = maxtcwilling; /* Move offset to Priority Assignment Table */ offset++; /* Priority Assignment Table (4 octets) * Octets:| 1 | 2 | 3 | 4 | * ----------------------------------------- * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| * ----------------------------------------- * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| * ----------------------------------------- */ for (i = 0; i < 4; i++) { priority0 = etscfg->prioritytable[i * 2] & 0xF; priority1 = etscfg->prioritytable[i * 2 + 1] & 0xF; buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) | priority1; offset++; } /* TC Bandwidth Table (8 octets) * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | * --------------------------------- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| * --------------------------------- */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) buf[offset++] = etscfg->tcbwtable[i]; /* TSA Assignment Table (8 octets) * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | * --------------------------------- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| * --------------------------------- */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) buf[offset++] = etscfg->tsatable[i]; } /** * i40e_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format * @tlv: Fill ETS Recommended TLV in IEEE format * @dcbcfg: Local store which holds the DCB Config * * Prepare IEEE 802.1Qaz ETS REC TLV **/ static void i40e_add_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { struct i40e_dcb_ets_config *etsrec; u16 offset = 0, typelength, i; u8 priority0, priority1; u8 *buf = tlv->tlvinfo; u32 ouisubtype; typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) | I40E_IEEE_ETS_TLV_LENGTH); tlv->typelength = I40E_HTONS(typelength); ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) | I40E_IEEE_SUBTYPE_ETS_REC); tlv->ouisubtype = I40E_HTONL(ouisubtype); etsrec = &dcbcfg->etsrec; /* First Octet is reserved */ /* Move offset to Priority Assignment Table */ offset++; /* Priority Assignment Table (4 octets) * Octets:| 1 | 2 | 3 | 4 | * ----------------------------------------- * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| * ----------------------------------------- * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| * ----------------------------------------- */ for (i = 0; i < 4; i++) { priority0 = etsrec->prioritytable[i * 2] & 0xF; priority1 = etsrec->prioritytable[i * 2 + 1] & 0xF; buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) | priority1; offset++; } /* TC Bandwidth Table (8 octets) * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | * --------------------------------- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| * --------------------------------- */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) buf[offset++] = etsrec->tcbwtable[i]; /* TSA Assignment Table (8 octets) * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | * --------------------------------- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| * --------------------------------- */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) buf[offset++] = etsrec->tsatable[i]; } /** * i40e_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format * @tlv: Fill PFC TLV in IEEE format * @dcbcfg: Local store to get PFC CFG data * * Prepare IEEE 802.1Qaz PFC CFG TLV **/ static void i40e_add_ieee_pfc_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { u8 *buf = tlv->tlvinfo; u32 ouisubtype; u16 typelength; typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) | I40E_IEEE_PFC_TLV_LENGTH); tlv->typelength = I40E_HTONS(typelength); ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) | I40E_IEEE_SUBTYPE_PFC_CFG); tlv->ouisubtype = I40E_HTONL(ouisubtype); /* ---------------------------------------- * |will-|MBC | Re- | PFC | PFC Enable | * |ing | |served| cap | | * ----------------------------------------- * |1bit | 1bit|2 bits|4bits| 1 octet | */ if (dcbcfg->pfc.willing) buf[0] = BIT(I40E_IEEE_PFC_WILLING_SHIFT); if (dcbcfg->pfc.mbc) buf[0] |= BIT(I40E_IEEE_PFC_MBC_SHIFT); buf[0] |= dcbcfg->pfc.pfccap & 0xF; buf[1] = dcbcfg->pfc.pfcenable; } /** * i40e_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format * @tlv: Fill APP TLV in IEEE format * @dcbcfg: Local store to get APP CFG data * * Prepare IEEE 802.1Qaz APP CFG TLV **/ static void i40e_add_ieee_app_pri_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { u16 typelength, length, offset = 0; u8 priority, selector, i = 0; u8 *buf = tlv->tlvinfo; u32 ouisubtype; /* No APP TLVs then just return */ if (dcbcfg->numapps == 0) return; ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) | I40E_IEEE_SUBTYPE_APP_PRI); tlv->ouisubtype = I40E_HTONL(ouisubtype); /* Move offset to App Priority Table */ offset++; /* Application Priority Table (3 octets) * Octets:| 1 | 2 | 3 | * ----------------------------------------- * |Priority|Rsrvd| Sel | Protocol ID | * ----------------------------------------- * Bits:|23 21|20 19|18 16|15 0| * ----------------------------------------- */ while (i < dcbcfg->numapps) { priority = dcbcfg->app[i].priority & 0x7; selector = dcbcfg->app[i].selector & 0x7; buf[offset] = (priority << I40E_IEEE_APP_PRIO_SHIFT) | selector; buf[offset + 1] = (dcbcfg->app[i].protocolid >> 0x8) & 0xFF; buf[offset + 2] = dcbcfg->app[i].protocolid & 0xFF; /* Move to next app */ offset += 3; i++; if (i >= I40E_DCBX_MAX_APPS) break; } /* length includes size of ouisubtype + 1 reserved + 3*numapps */ length = sizeof(tlv->ouisubtype) + 1 + (i*3); typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) | (length & 0x1FF)); tlv->typelength = I40E_HTONS(typelength); } /** * i40e_add_dcb_tlv - Add all IEEE TLVs * @tlv: pointer to org tlv * * add tlv information **/ static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg, u16 tlvid) { switch (tlvid) { case I40E_IEEE_TLV_ID_ETS_CFG: i40e_add_ieee_ets_tlv(tlv, dcbcfg); break; case I40E_IEEE_TLV_ID_ETS_REC: i40e_add_ieee_etsrec_tlv(tlv, dcbcfg); break; case I40E_IEEE_TLV_ID_PFC_CFG: i40e_add_ieee_pfc_tlv(tlv, dcbcfg); break; case I40E_IEEE_TLV_ID_APP_PRI: i40e_add_ieee_app_pri_tlv(tlv, dcbcfg); break; default: break; } } /** * i40e_set_dcb_config - Set the local LLDP MIB to FW * @hw: pointer to the hw struct * * Set DCB configuration to the Firmware **/ enum i40e_status_code i40e_set_dcb_config(struct i40e_hw *hw) { enum i40e_status_code ret = I40E_SUCCESS; struct i40e_dcbx_config *dcbcfg; struct i40e_virt_mem mem; u8 mib_type, *lldpmib; u16 miblen; /* update the hw local config */ dcbcfg = &hw->local_dcbx_config; /* Allocate the LLDPDU */ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE); if (ret) return ret; mib_type = SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB; if (dcbcfg->app_mode == I40E_DCBX_APPS_NON_WILLING) { mib_type |= SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS << SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT; } lldpmib = (u8 *)mem.va; ret = i40e_dcb_config_to_lldp(lldpmib, &miblen, dcbcfg); ret = i40e_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen, NULL); i40e_free_virt_mem(hw, &mem); return ret; } /** * i40e_dcb_config_to_lldp - Convert Dcbconfig to MIB format * @hw: pointer to the hw struct * @dcbcfg: store for LLDPDU data * * send DCB configuration to FW **/ enum i40e_status_code i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen, struct i40e_dcbx_config *dcbcfg) { u16 length, offset = 0, tlvid = I40E_TLV_ID_START; enum i40e_status_code ret = I40E_SUCCESS; struct i40e_lldp_org_tlv *tlv; u16 typelength; tlv = (struct i40e_lldp_org_tlv *)lldpmib; while (1) { i40e_add_dcb_tlv(tlv, dcbcfg, tlvid++); typelength = I40E_NTOHS(tlv->typelength); length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> I40E_LLDP_TLV_LEN_SHIFT); if (length) offset += length + 2; /* END TLV or beyond LLDPDU size */ if ((tlvid >= I40E_TLV_ID_END_OF_LLDPPDU) || (offset > I40E_LLDPDU_SIZE)) break; /* Move to next TLV */ if (length) tlv = (struct i40e_lldp_org_tlv *)((char *)tlv + sizeof(tlv->typelength) + length); } *miblen = offset; return ret; } /** * _i40e_read_lldp_cfg - generic read of LLDP Configuration data from NVM * @hw: pointer to the HW structure * @lldp_cfg: pointer to hold lldp configuration variables * @module: address of the module pointer * @word_offset: offset of LLDP configuration * * Reads the LLDP configuration data from NVM using passed addresses **/ static enum i40e_status_code _i40e_read_lldp_cfg(struct i40e_hw *hw, struct i40e_lldp_variables *lldp_cfg, u8 module, u32 word_offset) { u32 address, offset = (2 * word_offset); enum i40e_status_code ret; __le16 raw_mem; u16 mem; ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (ret != I40E_SUCCESS) return ret; ret = i40e_aq_read_nvm(hw, 0x0, module * 2, sizeof(raw_mem), &raw_mem, TRUE, NULL); i40e_release_nvm(hw); if (ret != I40E_SUCCESS) return ret; mem = LE16_TO_CPU(raw_mem); /* Check if this pointer needs to be read in word size or 4K sector * units. */ if (mem & I40E_PTR_TYPE) address = (0x7FFF & mem) * 4096; else address = (0x7FFF & mem) * 2; ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (ret != I40E_SUCCESS) goto err_lldp_cfg; ret = i40e_aq_read_nvm(hw, module, offset, sizeof(raw_mem), &raw_mem, TRUE, NULL); i40e_release_nvm(hw); if (ret != I40E_SUCCESS) return ret; mem = LE16_TO_CPU(raw_mem); offset = mem + word_offset; offset *= 2; ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (ret != I40E_SUCCESS) goto err_lldp_cfg; ret = i40e_aq_read_nvm(hw, 0, address + offset, sizeof(struct i40e_lldp_variables), lldp_cfg, TRUE, NULL); i40e_release_nvm(hw); err_lldp_cfg: return ret; } /** * i40e_read_lldp_cfg - read LLDP Configuration data from NVM * @hw: pointer to the HW structure * @lldp_cfg: pointer to hold lldp configuration variables * * Reads the LLDP configuration data from NVM **/ enum i40e_status_code i40e_read_lldp_cfg(struct i40e_hw *hw, struct i40e_lldp_variables *lldp_cfg) { enum i40e_status_code ret = I40E_SUCCESS; u32 mem; if (!lldp_cfg) return I40E_ERR_PARAM; ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (ret != I40E_SUCCESS) return ret; ret = i40e_aq_read_nvm(hw, I40E_SR_NVM_CONTROL_WORD, 0, sizeof(mem), &mem, TRUE, NULL); i40e_release_nvm(hw); if (ret != I40E_SUCCESS) return ret; /* Read a bit that holds information whether we are running flat or * structured NVM image. Flat image has LLDP configuration in shadow * ram, so there is a need to pass different addresses for both cases. */ if (mem & I40E_SR_NVM_MAP_STRUCTURE_TYPE) { /* Flat NVM case */ ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_SR_EMP_MODULE_PTR, I40E_SR_LLDP_CFG_PTR); } else { /* Good old structured NVM image */ ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_EMP_MODULE_PTR, I40E_NVM_LLDP_CFG_PTR); } return ret; } Index: releng/11.3/sys/dev/ixl/i40e_dcb.h =================================================================== --- releng/11.3/sys/dev/ixl/i40e_dcb.h (revision 349180) +++ releng/11.3/sys/dev/ixl/i40e_dcb.h (revision 349181) @@ -1,224 +1,234 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _I40E_DCB_H_ #define _I40E_DCB_H_ #include "i40e_type.h" #define I40E_DCBX_OFFLOAD_DISABLED 0 #define I40E_DCBX_OFFLOAD_ENABLED 1 #define I40E_DCBX_STATUS_NOT_STARTED 0 #define I40E_DCBX_STATUS_IN_PROGRESS 1 #define I40E_DCBX_STATUS_DONE 2 #define I40E_DCBX_STATUS_MULTIPLE_PEERS 3 #define I40E_DCBX_STATUS_DISABLED 7 #define I40E_TLV_TYPE_END 0 #define I40E_TLV_TYPE_ORG 127 #define I40E_IEEE_8021QAZ_OUI 0x0080C2 #define I40E_IEEE_SUBTYPE_ETS_CFG 9 #define I40E_IEEE_SUBTYPE_ETS_REC 10 #define I40E_IEEE_SUBTYPE_PFC_CFG 11 #define I40E_IEEE_SUBTYPE_APP_PRI 12 #define I40E_CEE_DCBX_OUI 0x001b21 #define I40E_CEE_DCBX_TYPE 2 #define I40E_CEE_SUBTYPE_CTRL 1 #define I40E_CEE_SUBTYPE_PG_CFG 2 #define I40E_CEE_SUBTYPE_PFC_CFG 3 #define I40E_CEE_SUBTYPE_APP_PRI 4 #define I40E_CEE_MAX_FEAT_TYPE 3 #define I40E_LLDP_ADMINSTATUS_DISABLED 0 #define I40E_LLDP_ADMINSTATUS_ENABLED_RX 1 #define I40E_LLDP_ADMINSTATUS_ENABLED_TX 2 #define I40E_LLDP_ADMINSTATUS_ENABLED_RXTX 3 /* Defines for LLDP TLV header */ #define I40E_LLDP_MIB_HLEN 14 #define I40E_LLDP_TLV_LEN_SHIFT 0 #define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT) #define I40E_LLDP_TLV_TYPE_SHIFT 9 #define I40E_LLDP_TLV_TYPE_MASK (0x7F << I40E_LLDP_TLV_TYPE_SHIFT) #define I40E_LLDP_TLV_SUBTYPE_SHIFT 0 #define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT) #define I40E_LLDP_TLV_OUI_SHIFT 8 #define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT) /* Defines for IEEE ETS TLV */ #define I40E_IEEE_ETS_MAXTC_SHIFT 0 #define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT) #define I40E_IEEE_ETS_CBS_SHIFT 6 #define I40E_IEEE_ETS_CBS_MASK BIT(I40E_IEEE_ETS_CBS_SHIFT) #define I40E_IEEE_ETS_WILLING_SHIFT 7 #define I40E_IEEE_ETS_WILLING_MASK BIT(I40E_IEEE_ETS_WILLING_SHIFT) #define I40E_IEEE_ETS_PRIO_0_SHIFT 0 #define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT) #define I40E_IEEE_ETS_PRIO_1_SHIFT 4 #define I40E_IEEE_ETS_PRIO_1_MASK (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT) #define I40E_CEE_PGID_PRIO_0_SHIFT 0 #define I40E_CEE_PGID_PRIO_0_MASK (0xF << I40E_CEE_PGID_PRIO_0_SHIFT) #define I40E_CEE_PGID_PRIO_1_SHIFT 4 #define I40E_CEE_PGID_PRIO_1_MASK (0xF << I40E_CEE_PGID_PRIO_1_SHIFT) #define I40E_CEE_PGID_STRICT 15 /* Defines for IEEE TSA types */ #define I40E_IEEE_TSA_STRICT 0 #define I40E_IEEE_TSA_CBS 1 #define I40E_IEEE_TSA_ETS 2 #define I40E_IEEE_TSA_VENDOR 255 /* Defines for IEEE PFC TLV */ #define I40E_IEEE_PFC_CAP_SHIFT 0 #define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT) #define I40E_IEEE_PFC_MBC_SHIFT 6 #define I40E_IEEE_PFC_MBC_MASK BIT(I40E_IEEE_PFC_MBC_SHIFT) #define I40E_IEEE_PFC_WILLING_SHIFT 7 #define I40E_IEEE_PFC_WILLING_MASK BIT(I40E_IEEE_PFC_WILLING_SHIFT) /* Defines for IEEE APP TLV */ #define I40E_IEEE_APP_SEL_SHIFT 0 #define I40E_IEEE_APP_SEL_MASK (0x7 << I40E_IEEE_APP_SEL_SHIFT) #define I40E_IEEE_APP_PRIO_SHIFT 5 #define I40E_IEEE_APP_PRIO_MASK (0x7 << I40E_IEEE_APP_PRIO_SHIFT) /* TLV definitions for preparing MIB */ #define I40E_TLV_ID_CHASSIS_ID 0 #define I40E_TLV_ID_PORT_ID 1 #define I40E_TLV_ID_TIME_TO_LIVE 2 #define I40E_IEEE_TLV_ID_ETS_CFG 3 #define I40E_IEEE_TLV_ID_ETS_REC 4 #define I40E_IEEE_TLV_ID_PFC_CFG 5 #define I40E_IEEE_TLV_ID_APP_PRI 6 #define I40E_TLV_ID_END_OF_LLDPPDU 7 #define I40E_TLV_ID_START I40E_IEEE_TLV_ID_ETS_CFG #define I40E_IEEE_ETS_TLV_LENGTH 25 #define I40E_IEEE_PFC_TLV_LENGTH 6 #define I40E_IEEE_APP_TLV_LENGTH 11 #pragma pack(1) /* IEEE 802.1AB LLDP TLV structure */ struct i40e_lldp_generic_tlv { __be16 typelength; u8 tlvinfo[1]; }; /* IEEE 802.1AB LLDP Organization specific TLV */ struct i40e_lldp_org_tlv { __be16 typelength; __be32 ouisubtype; u8 tlvinfo[1]; }; struct i40e_cee_tlv_hdr { __be16 typelen; u8 operver; u8 maxver; }; struct i40e_cee_ctrl_tlv { struct i40e_cee_tlv_hdr hdr; __be32 seqno; __be32 ackno; }; struct i40e_cee_feat_tlv { struct i40e_cee_tlv_hdr hdr; u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */ #define I40E_CEE_FEAT_TLV_ENABLE_MASK 0x80 #define I40E_CEE_FEAT_TLV_WILLING_MASK 0x40 #define I40E_CEE_FEAT_TLV_ERR_MASK 0x20 u8 subtype; u8 tlvinfo[1]; }; struct i40e_cee_app_prio { __be16 protocol; u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */ #define I40E_CEE_APP_SELECTOR_MASK 0x03 __be16 lower_oui; u8 prio_map; }; #pragma pack() /* * TODO: The below structures related LLDP/DCBX variables * and statistics are defined but need to find how to get * the required information from the Firmware to use them */ /* IEEE 802.1AB LLDP Agent Statistics */ struct i40e_lldp_stats { u64 remtablelastchangetime; u64 remtableinserts; u64 remtabledeletes; u64 remtabledrops; u64 remtableageouts; u64 txframestotal; u64 rxframesdiscarded; u64 rxportframeerrors; u64 rxportframestotal; u64 rxporttlvsdiscardedtotal; u64 rxporttlvsunrecognizedtotal; u64 remtoomanyneighbors; }; /* IEEE 802.1Qaz DCBX variables */ struct i40e_dcbx_variables { u32 defmaxtrafficclasses; u32 defprioritytcmapping; u32 deftcbandwidth; u32 deftsaassignment; }; + +enum i40e_get_fw_lldp_status_resp { + I40E_GET_FW_LLDP_STATUS_DISABLED = 0, + I40E_GET_FW_LLDP_STATUS_ENABLED = 1 +}; + enum i40e_status_code i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status); enum i40e_status_code i40e_lldp_to_dcb_config(u8 *lldpmib, struct i40e_dcbx_config *dcbcfg); enum i40e_status_code i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, u8 bridgetype, struct i40e_dcbx_config *dcbcfg); enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw); -enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw); +enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw, + bool enable_mib_change); +enum i40e_status_code +i40e_get_fw_lldp_status(struct i40e_hw *hw, + enum i40e_get_fw_lldp_status_resp *lldp_status); enum i40e_status_code i40e_set_dcb_config(struct i40e_hw *hw); enum i40e_status_code i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen, struct i40e_dcbx_config *dcbcfg); #endif /* _I40E_DCB_H_ */ Index: releng/11.3/sys/dev/ixl/i40e_devids.h =================================================================== --- releng/11.3/sys/dev/ixl/i40e_devids.h (revision 349180) +++ releng/11.3/sys/dev/ixl/i40e_devids.h (revision 349181) @@ -1,73 +1,78 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _I40E_DEVIDS_H_ #define _I40E_DEVIDS_H_ /* Vendor ID */ #define I40E_INTEL_VENDOR_ID 0x8086 /* Device IDs */ +#define I40E_DEV_ID_X710_N3000 0x0CF8 +#define I40E_DEV_ID_XXV710_N3000 0x0D58 #define I40E_DEV_ID_SFP_XL710 0x1572 #define I40E_DEV_ID_QEMU 0x1574 #define I40E_DEV_ID_KX_B 0x1580 #define I40E_DEV_ID_KX_C 0x1581 #define I40E_DEV_ID_QSFP_A 0x1583 #define I40E_DEV_ID_QSFP_B 0x1584 #define I40E_DEV_ID_QSFP_C 0x1585 #define I40E_DEV_ID_10G_BASE_T 0x1586 #define I40E_DEV_ID_20G_KR2 0x1587 #define I40E_DEV_ID_20G_KR2_A 0x1588 #define I40E_DEV_ID_10G_BASE_T4 0x1589 #define I40E_DEV_ID_25G_B 0x158A #define I40E_DEV_ID_25G_SFP28 0x158B +#define I40E_DEV_ID_10G_BASE_T_BC 0x15FF +#define I40E_DEV_ID_10G_B 0x104F +#define I40E_DEV_ID_10G_SFP 0x104E #define I40E_DEV_ID_VF 0x154C #define I40E_DEV_ID_VF_HV 0x1571 #define I40E_DEV_ID_ADAPTIVE_VF 0x1889 #define I40E_DEV_ID_KX_X722 0x37CE #define I40E_DEV_ID_QSFP_X722 0x37CF #define I40E_DEV_ID_SFP_X722 0x37D0 #define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 #define I40E_DEV_ID_SFP_I_X722 0x37D3 #define I40E_DEV_ID_X722_VF 0x37CD #define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ (d) == I40E_DEV_ID_QSFP_B || \ (d) == I40E_DEV_ID_QSFP_C) #define i40e_is_25G_device(d) ((d) == I40E_DEV_ID_25G_B || \ (d) == I40E_DEV_ID_25G_SFP28) #endif /* _I40E_DEVIDS_H_ */ Index: releng/11.3/sys/dev/ixl/i40e_hmc.c =================================================================== --- releng/11.3/sys/dev/ixl/i40e_hmc.c (revision 349180) +++ releng/11.3/sys/dev/ixl/i40e_hmc.c (revision 349181) @@ -1,370 +1,370 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "i40e_osdep.h" #include "i40e_register.h" #include "i40e_status.h" #include "i40e_alloc.h" #include "i40e_hmc.h" #include "i40e_type.h" /** * i40e_add_sd_table_entry - Adds a segment descriptor to the table * @hw: pointer to our hw struct * @hmc_info: pointer to the HMC configuration information struct * @sd_index: segment descriptor index to manipulate * @type: what type of segment descriptor we're manipulating * @direct_mode_sz: size to alloc in direct mode **/ enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 sd_index, enum i40e_sd_entry_type type, u64 direct_mode_sz) { enum i40e_status_code ret_code = I40E_SUCCESS; struct i40e_hmc_sd_entry *sd_entry; enum i40e_memory_type mem_type; bool dma_mem_alloc_done = FALSE; struct i40e_dma_mem mem; u64 alloc_len; if (NULL == hmc_info->sd_table.sd_entry) { ret_code = I40E_ERR_BAD_PTR; DEBUGOUT("i40e_add_sd_table_entry: bad sd_entry\n"); goto exit; } if (sd_index >= hmc_info->sd_table.sd_cnt) { ret_code = I40E_ERR_INVALID_SD_INDEX; DEBUGOUT("i40e_add_sd_table_entry: bad sd_index\n"); goto exit; } sd_entry = &hmc_info->sd_table.sd_entry[sd_index]; if (!sd_entry->valid) { if (I40E_SD_TYPE_PAGED == type) { mem_type = i40e_mem_pd; alloc_len = I40E_HMC_PAGED_BP_SIZE; } else { mem_type = i40e_mem_bp_jumbo; alloc_len = direct_mode_sz; } /* allocate a 4K pd page or 2M backing page */ ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len, I40E_HMC_PD_BP_BUF_ALIGNMENT); if (ret_code) goto exit; dma_mem_alloc_done = TRUE; if (I40E_SD_TYPE_PAGED == type) { ret_code = i40e_allocate_virt_mem(hw, &sd_entry->u.pd_table.pd_entry_virt_mem, sizeof(struct i40e_hmc_pd_entry) * 512); if (ret_code) goto exit; sd_entry->u.pd_table.pd_entry = (struct i40e_hmc_pd_entry *) sd_entry->u.pd_table.pd_entry_virt_mem.va; i40e_memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem, sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA); } else { i40e_memcpy(&sd_entry->u.bp.addr, &mem, sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA); sd_entry->u.bp.sd_pd_index = sd_index; } /* initialize the sd entry */ hmc_info->sd_table.sd_entry[sd_index].entry_type = type; /* increment the ref count */ I40E_INC_SD_REFCNT(&hmc_info->sd_table); } /* Increment backing page reference count */ if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type) I40E_INC_BP_REFCNT(&sd_entry->u.bp); exit: if (I40E_SUCCESS != ret_code) if (dma_mem_alloc_done) i40e_free_dma_mem(hw, &mem); return ret_code; } /** * i40e_add_pd_table_entry - Adds page descriptor to the specified table * @hw: pointer to our HW structure * @hmc_info: pointer to the HMC configuration information structure * @pd_index: which page descriptor index to manipulate * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one. * * This function: * 1. Initializes the pd entry * 2. Adds pd_entry in the pd_table * 3. Mark the entry valid in i40e_hmc_pd_entry structure * 4. Initializes the pd_entry's ref count to 1 * assumptions: * 1. The memory for pd should be pinned down, physically contiguous and * aligned on 4K boundary and zeroed memory. * 2. It should be 4K in size. **/ enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 pd_index, struct i40e_dma_mem *rsrc_pg) { enum i40e_status_code ret_code = I40E_SUCCESS; struct i40e_hmc_pd_table *pd_table; struct i40e_hmc_pd_entry *pd_entry; struct i40e_dma_mem mem; struct i40e_dma_mem *page = &mem; u32 sd_idx, rel_pd_idx; u64 *pd_addr; u64 page_desc; if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) { ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX; DEBUGOUT("i40e_add_pd_table_entry: bad pd_index\n"); goto exit; } /* find corresponding sd */ sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD); if (I40E_SD_TYPE_PAGED != hmc_info->sd_table.sd_entry[sd_idx].entry_type) goto exit; rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD); pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; pd_entry = &pd_table->pd_entry[rel_pd_idx]; if (!pd_entry->valid) { if (rsrc_pg) { pd_entry->rsrc_pg = TRUE; page = rsrc_pg; } else { /* allocate a 4K backing page */ ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp, I40E_HMC_PAGED_BP_SIZE, I40E_HMC_PD_BP_BUF_ALIGNMENT); if (ret_code) goto exit; pd_entry->rsrc_pg = FALSE; } i40e_memcpy(&pd_entry->bp.addr, page, sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA); pd_entry->bp.sd_pd_index = pd_index; pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED; /* Set page address and valid bit */ page_desc = page->pa | 0x1; pd_addr = (u64 *)pd_table->pd_page_addr.va; pd_addr += rel_pd_idx; /* Add the backing page physical address in the pd entry */ i40e_memcpy(pd_addr, &page_desc, sizeof(u64), I40E_NONDMA_TO_DMA); pd_entry->sd_index = sd_idx; pd_entry->valid = TRUE; I40E_INC_PD_REFCNT(pd_table); } I40E_INC_BP_REFCNT(&pd_entry->bp); exit: return ret_code; } /** * i40e_remove_pd_bp - remove a backing page from a page descriptor * @hw: pointer to our HW structure * @hmc_info: pointer to the HMC configuration information structure * @idx: the page index * * This function: * 1. Marks the entry in pd tabe (for paged address mode) or in sd table * (for direct address mode) invalid. * 2. Write to register PMPDINV to invalidate the backing page in FV cache * 3. Decrement the ref count for the pd _entry * assumptions: * 1. Caller can deallocate the memory used by backing storage after this * function returns. **/ enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx) { enum i40e_status_code ret_code = I40E_SUCCESS; struct i40e_hmc_pd_entry *pd_entry; struct i40e_hmc_pd_table *pd_table; struct i40e_hmc_sd_entry *sd_entry; u32 sd_idx, rel_pd_idx; u64 *pd_addr; /* calculate index */ sd_idx = idx / I40E_HMC_PD_CNT_IN_SD; rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD; if (sd_idx >= hmc_info->sd_table.sd_cnt) { ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX; DEBUGOUT("i40e_remove_pd_bp: bad idx\n"); goto exit; } sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) { ret_code = I40E_ERR_INVALID_SD_TYPE; DEBUGOUT("i40e_remove_pd_bp: wrong sd_entry type\n"); goto exit; } /* get the entry and decrease its ref counter */ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; pd_entry = &pd_table->pd_entry[rel_pd_idx]; I40E_DEC_BP_REFCNT(&pd_entry->bp); if (pd_entry->bp.ref_cnt) goto exit; /* mark the entry invalid */ pd_entry->valid = FALSE; I40E_DEC_PD_REFCNT(pd_table); pd_addr = (u64 *)pd_table->pd_page_addr.va; pd_addr += rel_pd_idx; i40e_memset(pd_addr, 0, sizeof(u64), I40E_DMA_MEM); I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx); /* free memory here */ if (!pd_entry->rsrc_pg) ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr)); if (I40E_SUCCESS != ret_code) goto exit; if (!pd_table->ref_cnt) i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem); exit: return ret_code; } /** * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry * @hmc_info: pointer to the HMC configuration information structure * @idx: the page index **/ enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, u32 idx) { enum i40e_status_code ret_code = I40E_SUCCESS; struct i40e_hmc_sd_entry *sd_entry; /* get the entry and decrease its ref counter */ sd_entry = &hmc_info->sd_table.sd_entry[idx]; I40E_DEC_BP_REFCNT(&sd_entry->u.bp); if (sd_entry->u.bp.ref_cnt) { ret_code = I40E_ERR_NOT_READY; goto exit; } I40E_DEC_SD_REFCNT(&hmc_info->sd_table); /* mark the entry invalid */ sd_entry->valid = FALSE; exit: return ret_code; } /** * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor * @hw: pointer to our hw struct * @hmc_info: pointer to the HMC configuration information structure * @idx: the page index * @is_pf: used to distinguish between VF and PF **/ enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx, bool is_pf) { struct i40e_hmc_sd_entry *sd_entry; if (!is_pf) return I40E_NOT_SUPPORTED; /* get the entry and decrease its ref counter */ sd_entry = &hmc_info->sd_table.sd_entry[idx]; I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT); return i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr)); } /** * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry. * @hmc_info: pointer to the HMC configuration information structure * @idx: segment descriptor index to find the relevant page descriptor **/ enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, u32 idx) { enum i40e_status_code ret_code = I40E_SUCCESS; struct i40e_hmc_sd_entry *sd_entry; sd_entry = &hmc_info->sd_table.sd_entry[idx]; if (sd_entry->u.pd_table.ref_cnt) { ret_code = I40E_ERR_NOT_READY; goto exit; } /* mark the entry invalid */ sd_entry->valid = FALSE; I40E_DEC_SD_REFCNT(&hmc_info->sd_table); exit: return ret_code; } /** * i40e_remove_pd_page_new - Removes a PD page from sd entry. * @hw: pointer to our hw struct * @hmc_info: pointer to the HMC configuration information structure * @idx: segment descriptor index to find the relevant page descriptor * @is_pf: used to distinguish between VF and PF **/ enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx, bool is_pf) { struct i40e_hmc_sd_entry *sd_entry; if (!is_pf) return I40E_NOT_SUPPORTED; sd_entry = &hmc_info->sd_table.sd_entry[idx]; I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED); return i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr)); } Index: releng/11.3/sys/dev/ixl/i40e_hmc.h =================================================================== --- releng/11.3/sys/dev/ixl/i40e_hmc.h (revision 349180) +++ releng/11.3/sys/dev/ixl/i40e_hmc.h (revision 349181) @@ -1,246 +1,246 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _I40E_HMC_H_ #define _I40E_HMC_H_ #define I40E_HMC_MAX_BP_COUNT 512 /* forward-declare the HW struct for the compiler */ struct i40e_hw; #define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */ #define I40E_HMC_PD_CNT_IN_SD 512 #define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */ #define I40E_HMC_PAGED_BP_SIZE 4096 #define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096 #define I40E_FIRST_VF_FPM_ID 16 struct i40e_hmc_obj_info { u64 base; /* base addr in FPM */ u32 max_cnt; /* max count available for this hmc func */ u32 cnt; /* count of objects driver actually wants to create */ u64 size; /* size in bytes of one object */ }; enum i40e_sd_entry_type { I40E_SD_TYPE_INVALID = 0, I40E_SD_TYPE_PAGED = 1, I40E_SD_TYPE_DIRECT = 2 }; struct i40e_hmc_bp { enum i40e_sd_entry_type entry_type; struct i40e_dma_mem addr; /* populate to be used by hw */ u32 sd_pd_index; u32 ref_cnt; }; struct i40e_hmc_pd_entry { struct i40e_hmc_bp bp; u32 sd_index; bool rsrc_pg; bool valid; }; struct i40e_hmc_pd_table { struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */ struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */ struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */ u32 ref_cnt; u32 sd_index; }; struct i40e_hmc_sd_entry { enum i40e_sd_entry_type entry_type; bool valid; union { struct i40e_hmc_pd_table pd_table; struct i40e_hmc_bp bp; } u; }; struct i40e_hmc_sd_table { struct i40e_virt_mem addr; /* used to track sd_entry allocations */ u32 sd_cnt; u32 ref_cnt; struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */ }; struct i40e_hmc_info { u32 signature; /* equals to pci func num for PF and dynamically allocated for VFs */ u8 hmc_fn_id; u16 first_sd_index; /* index of the first available SD */ /* hmc objects */ struct i40e_hmc_obj_info *hmc_obj; struct i40e_virt_mem hmc_obj_virt_mem; struct i40e_hmc_sd_table sd_table; }; #define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++) #define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++) #define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++) #define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--) #define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--) #define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--) /** * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware * @hw: pointer to our hw struct * @pa: pointer to physical address * @sd_index: segment descriptor index * @type: if sd entry is direct or paged **/ #define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \ { \ u32 val1, val2, val3; \ val1 = (u32)(I40E_HI_DWORD(pa)); \ val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \ BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ wr32((hw), I40E_PFHMC_SDCMD, val3); \ } /** * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware * @hw: pointer to our hw struct * @sd_index: segment descriptor index * @type: if sd entry is direct or paged **/ #define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \ { \ u32 val2, val3; \ val2 = (I40E_HMC_MAX_BP_COUNT << \ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ wr32((hw), I40E_PFHMC_SDCMD, val3); \ } /** * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware * @hw: pointer to our hw struct * @sd_idx: segment descriptor index * @pd_idx: page descriptor index **/ #define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \ wr32((hw), I40E_PFHMC_PDINV, \ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) /** * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit * @hmc_info: pointer to the HMC configuration information structure * @type: type of HMC resources we're searching * @index: starting index for the object * @cnt: number of objects we're trying to create * @sd_idx: pointer to return index of the segment descriptor in question * @sd_limit: pointer to return the maximum number of segment descriptors * * This function calculates the segment descriptor index and index limit * for the resource defined by i40e_hmc_rsrc_type. **/ #define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\ { \ u64 fpm_addr, fpm_limit; \ fpm_addr = (hmc_info)->hmc_obj[(type)].base + \ (hmc_info)->hmc_obj[(type)].size * (index); \ fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\ *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \ *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \ /* add one more to the limit to correct our range */ \ *(sd_limit) += 1; \ } /** * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit * @hmc_info: pointer to the HMC configuration information struct * @type: HMC resource type we're examining * @idx: starting index for the object * @cnt: number of objects we're trying to create * @pd_index: pointer to return page descriptor index * @pd_limit: pointer to return page descriptor index limit * * Calculates the page descriptor index and index limit for the resource * defined by i40e_hmc_rsrc_type. **/ #define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\ { \ u64 fpm_adr, fpm_limit; \ fpm_adr = (hmc_info)->hmc_obj[(type)].base + \ (hmc_info)->hmc_obj[(type)].size * (idx); \ fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \ *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \ *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \ /* add one more to the limit to correct our range */ \ *(pd_limit) += 1; \ } enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 sd_index, enum i40e_sd_entry_type type, u64 direct_mode_sz); enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 pd_index, struct i40e_dma_mem *rsrc_pg); enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx); enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, u32 idx); enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx, bool is_pf); enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, u32 idx); enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx, bool is_pf); #endif /* _I40E_HMC_H_ */ Index: releng/11.3/sys/dev/ixl/i40e_lan_hmc.c =================================================================== --- releng/11.3/sys/dev/ixl/i40e_lan_hmc.c (revision 349180) +++ releng/11.3/sys/dev/ixl/i40e_lan_hmc.c (revision 349181) @@ -1,1407 +1,1412 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "i40e_osdep.h" #include "i40e_register.h" #include "i40e_type.h" #include "i40e_hmc.h" #include "i40e_lan_hmc.h" #include "i40e_prototype.h" /* lan specific interface functions */ /** * i40e_align_l2obj_base - aligns base object pointer to 512 bytes * @offset: base address offset needing alignment * * Aligns the layer 2 function private memory so it's 512-byte aligned. **/ static u64 i40e_align_l2obj_base(u64 offset) { u64 aligned_offset = offset; if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0) aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT - (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT)); return aligned_offset; } /** * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size * @txq_num: number of Tx queues needing backing context * @rxq_num: number of Rx queues needing backing context * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context * @fcoe_filt_num: number of FCoE filters needing backing context * * Calculates the maximum amount of memory for the function required, based * on the number of resources it must provide context for. **/ u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num, u32 fcoe_cntx_num, u32 fcoe_filt_num) { u64 fpm_size = 0; fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ; fpm_size = i40e_align_l2obj_base(fpm_size); fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ); fpm_size = i40e_align_l2obj_base(fpm_size); fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX); fpm_size = i40e_align_l2obj_base(fpm_size); fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT); fpm_size = i40e_align_l2obj_base(fpm_size); return fpm_size; } /** * i40e_init_lan_hmc - initialize i40e_hmc_info struct * @hw: pointer to the HW structure * @txq_num: number of Tx queues needing backing context * @rxq_num: number of Rx queues needing backing context * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context * @fcoe_filt_num: number of FCoE filters needing backing context * * This function will be called once per physical function initialization. * It will fill out the i40e_hmc_obj_info structure for LAN objects based on * the driver's provided input, as well as information from the HMC itself * loaded from NVRAM. * * Assumptions: * - HMC Resource Profile has been selected before calling this function. **/ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, u32 rxq_num, u32 fcoe_cntx_num, u32 fcoe_filt_num) { struct i40e_hmc_obj_info *obj, *full_obj; enum i40e_status_code ret_code = I40E_SUCCESS; u64 l2fpm_size; u32 size_exp; hw->hmc.signature = I40E_HMC_INFO_SIGNATURE; hw->hmc.hmc_fn_id = hw->pf_id; /* allocate memory for hmc_obj */ ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem, sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX); if (ret_code) goto init_lan_hmc_out; hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *) hw->hmc.hmc_obj_virt_mem.va; /* The full object will be used to create the LAN HMC SD */ full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL]; full_obj->max_cnt = 0; full_obj->cnt = 0; full_obj->base = 0; full_obj->size = 0; /* Tx queue context information */ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX]; obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX); obj->cnt = txq_num; obj->base = 0; size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ); obj->size = BIT_ULL(size_exp); /* validate values requested by driver don't exceed HMC capacity */ if (txq_num > obj->max_cnt) { ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", txq_num, obj->max_cnt, ret_code); - goto init_lan_hmc_out; + goto free_hmc_out; } /* aggregate values into the full LAN object for later */ full_obj->max_cnt += obj->max_cnt; full_obj->cnt += obj->cnt; /* Rx queue context information */ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX]; obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX); obj->cnt = rxq_num; obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base + (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt * hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size); obj->base = i40e_align_l2obj_base(obj->base); size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ); obj->size = BIT_ULL(size_exp); /* validate values requested by driver don't exceed HMC capacity */ if (rxq_num > obj->max_cnt) { ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", rxq_num, obj->max_cnt, ret_code); - goto init_lan_hmc_out; + goto free_hmc_out; } /* aggregate values into the full LAN object for later */ full_obj->max_cnt += obj->max_cnt; full_obj->cnt += obj->cnt; /* FCoE context information */ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX]; obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX); obj->cnt = fcoe_cntx_num; obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base + (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt * hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size); obj->base = i40e_align_l2obj_base(obj->base); size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ); obj->size = BIT_ULL(size_exp); /* validate values requested by driver don't exceed HMC capacity */ if (fcoe_cntx_num > obj->max_cnt) { ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", fcoe_cntx_num, obj->max_cnt, ret_code); - goto init_lan_hmc_out; + goto free_hmc_out; } /* aggregate values into the full LAN object for later */ full_obj->max_cnt += obj->max_cnt; full_obj->cnt += obj->cnt; /* FCoE filter information */ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT]; obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX); obj->cnt = fcoe_filt_num; obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base + (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt * hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size); obj->base = i40e_align_l2obj_base(obj->base); size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ); obj->size = BIT_ULL(size_exp); /* validate values requested by driver don't exceed HMC capacity */ if (fcoe_filt_num > obj->max_cnt) { ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n", fcoe_filt_num, obj->max_cnt, ret_code); - goto init_lan_hmc_out; + goto free_hmc_out; } /* aggregate values into the full LAN object for later */ full_obj->max_cnt += obj->max_cnt; full_obj->cnt += obj->cnt; hw->hmc.first_sd_index = 0; hw->hmc.sd_table.ref_cnt = 0; l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num, fcoe_filt_num); if (NULL == hw->hmc.sd_table.sd_entry) { hw->hmc.sd_table.sd_cnt = (u32) (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) / I40E_HMC_DIRECT_BP_SIZE; /* allocate the sd_entry members in the sd_table */ ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr, (sizeof(struct i40e_hmc_sd_entry) * hw->hmc.sd_table.sd_cnt)); if (ret_code) - goto init_lan_hmc_out; + goto free_hmc_out; hw->hmc.sd_table.sd_entry = (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va; } /* store in the LAN full object for later */ full_obj->size = l2fpm_size; init_lan_hmc_out: + return ret_code; +free_hmc_out: + if (hw->hmc.hmc_obj_virt_mem.va) + i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem); + return ret_code; } /** * i40e_remove_pd_page - Remove a page from the page descriptor table * @hw: pointer to the HW structure * @hmc_info: pointer to the HMC configuration information structure * @idx: segment descriptor index to find the relevant page descriptor * * This function: * 1. Marks the entry in pd table (for paged address mode) invalid * 2. write to register PMPDINV to invalidate the backing page in FV cache * 3. Decrement the ref count for pd_entry * assumptions: * 1. caller can deallocate the memory used by pd after this function * returns. **/ static enum i40e_status_code i40e_remove_pd_page(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx) { enum i40e_status_code ret_code = I40E_SUCCESS; if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS) ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, TRUE); return ret_code; } /** * i40e_remove_sd_bp - remove a backing page from a segment descriptor * @hw: pointer to our HW structure * @hmc_info: pointer to the HMC configuration information structure * @idx: the page index * * This function: * 1. Marks the entry in sd table (for direct address mode) invalid * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set * to 0) and PMSDDATAHIGH to invalidate the sd page * 3. Decrement the ref count for the sd_entry * assumptions: * 1. caller can deallocate the memory used by backing storage after this * function returns. **/ static enum i40e_status_code i40e_remove_sd_bp(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx) { enum i40e_status_code ret_code = I40E_SUCCESS; if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS) ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, TRUE); return ret_code; } /** * i40e_create_lan_hmc_object - allocate backing store for hmc objects * @hw: pointer to the HW structure * @info: pointer to i40e_hmc_create_obj_info struct * * This will allocate memory for PDs and backing pages and populate * the sd and pd entries. **/ enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw, struct i40e_hmc_lan_create_obj_info *info) { enum i40e_status_code ret_code = I40E_SUCCESS; struct i40e_hmc_sd_entry *sd_entry; u32 pd_idx1 = 0, pd_lmt1 = 0; u32 pd_idx = 0, pd_lmt = 0; bool pd_error = FALSE; u32 sd_idx, sd_lmt; u64 sd_size; u32 i, j; if (NULL == info) { ret_code = I40E_ERR_BAD_PTR; DEBUGOUT("i40e_create_lan_hmc_object: bad info ptr\n"); goto exit; } if (NULL == info->hmc_info) { ret_code = I40E_ERR_BAD_PTR; DEBUGOUT("i40e_create_lan_hmc_object: bad hmc_info ptr\n"); goto exit; } if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) { ret_code = I40E_ERR_BAD_PTR; DEBUGOUT("i40e_create_lan_hmc_object: bad signature\n"); goto exit; } if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n", ret_code); goto exit; } if ((info->start_idx + info->count) > info->hmc_info->hmc_obj[info->rsrc_type].cnt) { ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n", ret_code); goto exit; } /* find sd index and limit */ I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, info->start_idx, info->count, &sd_idx, &sd_lmt); if (sd_idx >= info->hmc_info->sd_table.sd_cnt || sd_lmt > info->hmc_info->sd_table.sd_cnt) { ret_code = I40E_ERR_INVALID_SD_INDEX; goto exit; } /* find pd index */ I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, info->start_idx, info->count, &pd_idx, &pd_lmt); /* This is to cover for cases where you may not want to have an SD with * the full 2M memory but something smaller. By not filling out any * size, the function will default the SD size to be 2M. */ if (info->direct_mode_sz == 0) sd_size = I40E_HMC_DIRECT_BP_SIZE; else sd_size = info->direct_mode_sz; /* check if all the sds are valid. If not, allocate a page and * initialize it. */ for (j = sd_idx; j < sd_lmt; j++) { /* update the sd table entry */ ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j, info->entry_type, sd_size); if (I40E_SUCCESS != ret_code) goto exit_sd_error; sd_entry = &info->hmc_info->sd_table.sd_entry[j]; if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) { /* check if all the pds in this sd are valid. If not, * allocate a page and initialize it. */ /* find pd_idx and pd_lmt in this sd */ pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT)); pd_lmt1 = min(pd_lmt, ((j + 1) * I40E_HMC_MAX_BP_COUNT)); for (i = pd_idx1; i < pd_lmt1; i++) { /* update the pd table entry */ ret_code = i40e_add_pd_table_entry(hw, info->hmc_info, i, NULL); if (I40E_SUCCESS != ret_code) { pd_error = TRUE; break; } } if (pd_error) { /* remove the backing pages from pd_idx1 to i */ while (i && (i > pd_idx1)) { i40e_remove_pd_bp(hw, info->hmc_info, (i - 1)); i--; } } } if (!sd_entry->valid) { sd_entry->valid = TRUE; switch (sd_entry->entry_type) { case I40E_SD_TYPE_PAGED: I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.pd_table.pd_page_addr.pa, j, sd_entry->entry_type); break; case I40E_SD_TYPE_DIRECT: I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa, j, sd_entry->entry_type); break; default: ret_code = I40E_ERR_INVALID_SD_TYPE; goto exit; } } } goto exit; exit_sd_error: /* cleanup for sd entries from j to sd_idx */ while (j && (j > sd_idx)) { sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1]; switch (sd_entry->entry_type) { case I40E_SD_TYPE_PAGED: pd_idx1 = max(pd_idx, ((j - 1) * I40E_HMC_MAX_BP_COUNT)); pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT)); for (i = pd_idx1; i < pd_lmt1; i++) i40e_remove_pd_bp(hw, info->hmc_info, i); i40e_remove_pd_page(hw, info->hmc_info, (j - 1)); break; case I40E_SD_TYPE_DIRECT: i40e_remove_sd_bp(hw, info->hmc_info, (j - 1)); break; default: ret_code = I40E_ERR_INVALID_SD_TYPE; break; } j--; } exit: return ret_code; } /** * i40e_configure_lan_hmc - prepare the HMC backing store * @hw: pointer to the hw structure * @model: the model for the layout of the SD/PD tables * * - This function will be called once per physical function initialization. * - This function will be called after i40e_init_lan_hmc() and before * any LAN/FCoE HMC objects can be created. **/ enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw, enum i40e_hmc_model model) { struct i40e_hmc_lan_create_obj_info info; u8 hmc_fn_id = hw->hmc.hmc_fn_id; struct i40e_hmc_obj_info *obj; enum i40e_status_code ret_code = I40E_SUCCESS; /* Initialize part of the create object info struct */ info.hmc_info = &hw->hmc; info.rsrc_type = I40E_HMC_LAN_FULL; info.start_idx = 0; info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size; /* Build the SD entry for the LAN objects */ switch (model) { case I40E_HMC_MODEL_DIRECT_PREFERRED: case I40E_HMC_MODEL_DIRECT_ONLY: info.entry_type = I40E_SD_TYPE_DIRECT; /* Make one big object, a single SD */ info.count = 1; ret_code = i40e_create_lan_hmc_object(hw, &info); if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED)) goto try_type_paged; else if (ret_code != I40E_SUCCESS) goto configure_lan_hmc_out; /* else clause falls through the break */ break; case I40E_HMC_MODEL_PAGED_ONLY: try_type_paged: info.entry_type = I40E_SD_TYPE_PAGED; /* Make one big object in the PD table */ info.count = 1; ret_code = i40e_create_lan_hmc_object(hw, &info); if (ret_code != I40E_SUCCESS) goto configure_lan_hmc_out; break; default: /* unsupported type */ ret_code = I40E_ERR_INVALID_SD_TYPE; DEBUGOUT1("i40e_configure_lan_hmc: Unknown SD type: %d\n", ret_code); goto configure_lan_hmc_out; } /* Configure and program the FPM registers so objects can be created */ /* Tx contexts */ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX]; wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id), (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512)); wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt); /* Rx contexts */ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX]; wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id), (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512)); wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt); /* FCoE contexts */ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX]; wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id), (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512)); wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt); /* FCoE filters */ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT]; wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id), (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512)); wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt); configure_lan_hmc_out: return ret_code; } /** * i40e_delete_hmc_object - remove hmc objects * @hw: pointer to the HW structure * @info: pointer to i40e_hmc_delete_obj_info struct * * This will de-populate the SDs and PDs. It frees * the memory for PDS and backing storage. After this function is returned, * caller should deallocate memory allocated previously for * book-keeping information about PDs and backing storage. **/ enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw, struct i40e_hmc_lan_delete_obj_info *info) { enum i40e_status_code ret_code = I40E_SUCCESS; struct i40e_hmc_pd_table *pd_table; u32 pd_idx, pd_lmt, rel_pd_idx; u32 sd_idx, sd_lmt; u32 i, j; if (NULL == info) { ret_code = I40E_ERR_BAD_PTR; DEBUGOUT("i40e_delete_hmc_object: bad info ptr\n"); goto exit; } if (NULL == info->hmc_info) { ret_code = I40E_ERR_BAD_PTR; DEBUGOUT("i40e_delete_hmc_object: bad info->hmc_info ptr\n"); goto exit; } if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) { ret_code = I40E_ERR_BAD_PTR; DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->signature\n"); goto exit; } if (NULL == info->hmc_info->sd_table.sd_entry) { ret_code = I40E_ERR_BAD_PTR; DEBUGOUT("i40e_delete_hmc_object: bad sd_entry\n"); goto exit; } if (NULL == info->hmc_info->hmc_obj) { ret_code = I40E_ERR_BAD_PTR; DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->hmc_obj\n"); goto exit; } if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n", ret_code); goto exit; } if ((info->start_idx + info->count) > info->hmc_info->hmc_obj[info->rsrc_type].cnt) { ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n", ret_code); goto exit; } I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, info->start_idx, info->count, &pd_idx, &pd_lmt); for (j = pd_idx; j < pd_lmt; j++) { sd_idx = j / I40E_HMC_PD_CNT_IN_SD; if (I40E_SD_TYPE_PAGED != info->hmc_info->sd_table.sd_entry[sd_idx].entry_type) continue; rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD; pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; if (pd_table->pd_entry[rel_pd_idx].valid) { ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j); if (I40E_SUCCESS != ret_code) goto exit; } } /* find sd index and limit */ I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, info->start_idx, info->count, &sd_idx, &sd_lmt); if (sd_idx >= info->hmc_info->sd_table.sd_cnt || sd_lmt > info->hmc_info->sd_table.sd_cnt) { ret_code = I40E_ERR_INVALID_SD_INDEX; goto exit; } for (i = sd_idx; i < sd_lmt; i++) { if (!info->hmc_info->sd_table.sd_entry[i].valid) continue; switch (info->hmc_info->sd_table.sd_entry[i].entry_type) { case I40E_SD_TYPE_DIRECT: ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i); if (I40E_SUCCESS != ret_code) goto exit; break; case I40E_SD_TYPE_PAGED: ret_code = i40e_remove_pd_page(hw, info->hmc_info, i); if (I40E_SUCCESS != ret_code) goto exit; break; default: break; } } exit: return ret_code; } /** * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory * @hw: pointer to the hw structure * * This must be called by drivers as they are shutting down and being * removed from the OS. **/ enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw) { struct i40e_hmc_lan_delete_obj_info info; enum i40e_status_code ret_code; info.hmc_info = &hw->hmc; info.rsrc_type = I40E_HMC_LAN_FULL; info.start_idx = 0; info.count = 1; /* delete the object */ ret_code = i40e_delete_lan_hmc_object(hw, &info); /* free the SD table entry for LAN */ i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr); hw->hmc.sd_table.sd_cnt = 0; hw->hmc.sd_table.sd_entry = NULL; /* free memory used for hmc_obj */ i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem); hw->hmc.hmc_obj = NULL; return ret_code; } #define I40E_HMC_STORE(_struct, _ele) \ offsetof(struct _struct, _ele), \ FIELD_SIZEOF(struct _struct, _ele) struct i40e_context_ele { u16 offset; u16 size_of; u16 width; u16 lsb; }; /* LAN Tx Queue Context */ static struct i40e_context_ele i40e_hmc_txq_ce_info[] = { /* Field Width LSB */ {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 }, /* line 1 */ {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 }, /* line 7 */ {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) }, {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) }, {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) }, { 0 } }; /* LAN Rx Queue Context */ static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = { /* Field Width LSB */ { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 }, { 0 } }; /** * i40e_write_byte - replace HMC context byte * @hmc_bits: pointer to the HMC memory * @ce_info: a description of the struct to be read from * @src: the struct to be read from **/ static void i40e_write_byte(u8 *hmc_bits, struct i40e_context_ele *ce_info, u8 *src) { u8 src_byte, dest_byte, mask; u8 *from, *dest; u16 shift_width; /* copy from the next struct field */ from = src + ce_info->offset; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; mask = (u8)(BIT(ce_info->width) - 1); src_byte = *from; src_byte &= mask; /* shift to correct alignment */ mask <<= shift_width; src_byte <<= shift_width; /* get the current bits from the target bit string */ dest = hmc_bits + (ce_info->lsb / 8); i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA); dest_byte &= ~mask; /* get the bits not changing */ dest_byte |= src_byte; /* add in the new bits */ /* put it all back */ i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA); } /** * i40e_write_word - replace HMC context word * @hmc_bits: pointer to the HMC memory * @ce_info: a description of the struct to be read from * @src: the struct to be read from **/ static void i40e_write_word(u8 *hmc_bits, struct i40e_context_ele *ce_info, u8 *src) { u16 src_word, mask; u8 *from, *dest; u16 shift_width; __le16 dest_word; /* copy from the next struct field */ from = src + ce_info->offset; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; mask = BIT(ce_info->width) - 1; /* don't swizzle the bits until after the mask because the mask bits * will be in a different bit position on big endian machines */ src_word = *(u16 *)from; src_word &= mask; /* shift to correct alignment */ mask <<= shift_width; src_word <<= shift_width; /* get the current bits from the target bit string */ dest = hmc_bits + (ce_info->lsb / 8); i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA); dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */ dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */ /* put it all back */ i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA); } /** * i40e_write_dword - replace HMC context dword * @hmc_bits: pointer to the HMC memory * @ce_info: a description of the struct to be read from * @src: the struct to be read from **/ static void i40e_write_dword(u8 *hmc_bits, struct i40e_context_ele *ce_info, u8 *src) { u32 src_dword, mask; u8 *from, *dest; u16 shift_width; __le32 dest_dword; /* copy from the next struct field */ from = src + ce_info->offset; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; /* if the field width is exactly 32 on an x86 machine, then the shift * operation will not work because the SHL instructions count is masked * to 5 bits so the shift will do nothing */ if (ce_info->width < 32) mask = BIT(ce_info->width) - 1; else mask = ~(u32)0; /* don't swizzle the bits until after the mask because the mask bits * will be in a different bit position on big endian machines */ src_dword = *(u32 *)from; src_dword &= mask; /* shift to correct alignment */ mask <<= shift_width; src_dword <<= shift_width; /* get the current bits from the target bit string */ dest = hmc_bits + (ce_info->lsb / 8); i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA); dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */ dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */ /* put it all back */ i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA); } /** * i40e_write_qword - replace HMC context qword * @hmc_bits: pointer to the HMC memory * @ce_info: a description of the struct to be read from * @src: the struct to be read from **/ static void i40e_write_qword(u8 *hmc_bits, struct i40e_context_ele *ce_info, u8 *src) { u64 src_qword, mask; u8 *from, *dest; u16 shift_width; __le64 dest_qword; /* copy from the next struct field */ from = src + ce_info->offset; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; /* if the field width is exactly 64 on an x86 machine, then the shift * operation will not work because the SHL instructions count is masked * to 6 bits so the shift will do nothing */ if (ce_info->width < 64) mask = BIT_ULL(ce_info->width) - 1; else mask = ~(u64)0; /* don't swizzle the bits until after the mask because the mask bits * will be in a different bit position on big endian machines */ src_qword = *(u64 *)from; src_qword &= mask; /* shift to correct alignment */ mask <<= shift_width; src_qword <<= shift_width; /* get the current bits from the target bit string */ dest = hmc_bits + (ce_info->lsb / 8); i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA); dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */ dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */ /* put it all back */ i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA); } /** * i40e_read_byte - read HMC context byte into struct * @hmc_bits: pointer to the HMC memory * @ce_info: a description of the struct to be filled * @dest: the struct to be filled **/ static void i40e_read_byte(u8 *hmc_bits, struct i40e_context_ele *ce_info, u8 *dest) { u8 dest_byte, mask; u8 *src, *target; u16 shift_width; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; mask = (u8)(BIT(ce_info->width) - 1); /* shift to correct alignment */ mask <<= shift_width; /* get the current bits from the src bit string */ src = hmc_bits + (ce_info->lsb / 8); i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA); dest_byte &= ~(mask); dest_byte >>= shift_width; /* get the address from the struct field */ target = dest + ce_info->offset; /* put it back in the struct */ i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA); } /** * i40e_read_word - read HMC context word into struct * @hmc_bits: pointer to the HMC memory * @ce_info: a description of the struct to be filled * @dest: the struct to be filled **/ static void i40e_read_word(u8 *hmc_bits, struct i40e_context_ele *ce_info, u8 *dest) { u16 dest_word, mask; u8 *src, *target; u16 shift_width; __le16 src_word; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; mask = BIT(ce_info->width) - 1; /* shift to correct alignment */ mask <<= shift_width; /* get the current bits from the src bit string */ src = hmc_bits + (ce_info->lsb / 8); i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA); /* the data in the memory is stored as little endian so mask it * correctly */ src_word &= ~(CPU_TO_LE16(mask)); /* get the data back into host order before shifting */ dest_word = LE16_TO_CPU(src_word); dest_word >>= shift_width; /* get the address from the struct field */ target = dest + ce_info->offset; /* put it back in the struct */ i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA); } /** * i40e_read_dword - read HMC context dword into struct * @hmc_bits: pointer to the HMC memory * @ce_info: a description of the struct to be filled * @dest: the struct to be filled **/ static void i40e_read_dword(u8 *hmc_bits, struct i40e_context_ele *ce_info, u8 *dest) { u32 dest_dword, mask; u8 *src, *target; u16 shift_width; __le32 src_dword; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; /* if the field width is exactly 32 on an x86 machine, then the shift * operation will not work because the SHL instructions count is masked * to 5 bits so the shift will do nothing */ if (ce_info->width < 32) mask = BIT(ce_info->width) - 1; else mask = ~(u32)0; /* shift to correct alignment */ mask <<= shift_width; /* get the current bits from the src bit string */ src = hmc_bits + (ce_info->lsb / 8); i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA); /* the data in the memory is stored as little endian so mask it * correctly */ src_dword &= ~(CPU_TO_LE32(mask)); /* get the data back into host order before shifting */ dest_dword = LE32_TO_CPU(src_dword); dest_dword >>= shift_width; /* get the address from the struct field */ target = dest + ce_info->offset; /* put it back in the struct */ i40e_memcpy(target, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA); } /** * i40e_read_qword - read HMC context qword into struct * @hmc_bits: pointer to the HMC memory * @ce_info: a description of the struct to be filled * @dest: the struct to be filled **/ static void i40e_read_qword(u8 *hmc_bits, struct i40e_context_ele *ce_info, u8 *dest) { u64 dest_qword, mask; u8 *src, *target; u16 shift_width; __le64 src_qword; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; /* if the field width is exactly 64 on an x86 machine, then the shift * operation will not work because the SHL instructions count is masked * to 6 bits so the shift will do nothing */ if (ce_info->width < 64) mask = BIT_ULL(ce_info->width) - 1; else mask = ~(u64)0; /* shift to correct alignment */ mask <<= shift_width; /* get the current bits from the src bit string */ src = hmc_bits + (ce_info->lsb / 8); i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA); /* the data in the memory is stored as little endian so mask it * correctly */ src_qword &= ~(CPU_TO_LE64(mask)); /* get the data back into host order before shifting */ dest_qword = LE64_TO_CPU(src_qword); dest_qword >>= shift_width; /* get the address from the struct field */ target = dest + ce_info->offset; /* put it back in the struct */ i40e_memcpy(target, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA); } /** * i40e_get_hmc_context - extract HMC context bits * @context_bytes: pointer to the context bit array * @ce_info: a description of the struct to be filled * @dest: the struct to be filled **/ static enum i40e_status_code i40e_get_hmc_context(u8 *context_bytes, struct i40e_context_ele *ce_info, u8 *dest) { int f; for (f = 0; ce_info[f].width != 0; f++) { switch (ce_info[f].size_of) { case 1: i40e_read_byte(context_bytes, &ce_info[f], dest); break; case 2: i40e_read_word(context_bytes, &ce_info[f], dest); break; case 4: i40e_read_dword(context_bytes, &ce_info[f], dest); break; case 8: i40e_read_qword(context_bytes, &ce_info[f], dest); break; default: /* nothing to do, just keep going */ break; } } return I40E_SUCCESS; } /** * i40e_clear_hmc_context - zero out the HMC context bits * @hw: the hardware struct * @context_bytes: pointer to the context bit array (DMA memory) * @hmc_type: the type of HMC resource **/ static enum i40e_status_code i40e_clear_hmc_context(struct i40e_hw *hw, u8 *context_bytes, enum i40e_hmc_lan_rsrc_type hmc_type) { /* clean the bit array */ i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size, I40E_DMA_MEM); return I40E_SUCCESS; } /** * i40e_set_hmc_context - replace HMC context bits * @context_bytes: pointer to the context bit array * @ce_info: a description of the struct to be filled * @dest: the struct to be filled **/ static enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes, struct i40e_context_ele *ce_info, u8 *dest) { int f; for (f = 0; ce_info[f].width != 0; f++) { /* we have to deal with each element of the HMC using the * correct size so that we are correct regardless of the * endianness of the machine */ switch (ce_info[f].size_of) { case 1: i40e_write_byte(context_bytes, &ce_info[f], dest); break; case 2: i40e_write_word(context_bytes, &ce_info[f], dest); break; case 4: i40e_write_dword(context_bytes, &ce_info[f], dest); break; case 8: i40e_write_qword(context_bytes, &ce_info[f], dest); break; } } return I40E_SUCCESS; } /** * i40e_hmc_get_object_va - retrieves an object's virtual address * @hw: pointer to the hw structure * @object_base: pointer to u64 to get the va * @rsrc_type: the hmc resource type * @obj_idx: hmc object index * * This function retrieves the object's virtual address from the object * base pointer. This function is used for LAN Queue contexts. **/ static enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base, enum i40e_hmc_lan_rsrc_type rsrc_type, u32 obj_idx) { u32 obj_offset_in_sd, obj_offset_in_pd; struct i40e_hmc_info *hmc_info = &hw->hmc; struct i40e_hmc_sd_entry *sd_entry; struct i40e_hmc_pd_entry *pd_entry; u32 pd_idx, pd_lmt, rel_pd_idx; enum i40e_status_code ret_code = I40E_SUCCESS; u64 obj_offset_in_fpm; u32 sd_idx, sd_lmt; if (NULL == hmc_info->hmc_obj) { ret_code = I40E_ERR_BAD_PTR; DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n"); goto exit; } if (NULL == object_base) { ret_code = I40E_ERR_BAD_PTR; DEBUGOUT("i40e_hmc_get_object_va: bad object_base ptr\n"); goto exit; } if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) { ret_code = I40E_ERR_BAD_PTR; DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->signature\n"); goto exit; } if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) { DEBUGOUT1("i40e_hmc_get_object_va: returns error %d\n", ret_code); ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; goto exit; } /* find sd index and limit */ I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1, &sd_idx, &sd_lmt); sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base + hmc_info->hmc_obj[rsrc_type].size * obj_idx; if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) { I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1, &pd_idx, &pd_lmt); rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD; pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx]; obj_offset_in_pd = (u32)(obj_offset_in_fpm % I40E_HMC_PAGED_BP_SIZE); *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd; } else { obj_offset_in_sd = (u32)(obj_offset_in_fpm % I40E_HMC_DIRECT_BP_SIZE); *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd; } exit: return ret_code; } /** * i40e_get_lan_tx_queue_context - return the HMC context for the queue * @hw: the hardware struct * @queue: the queue we care about * @s: the struct to be filled **/ enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw, u16 queue, struct i40e_hmc_obj_txq *s) { enum i40e_status_code err; u8 *context_bytes; err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue); if (err < 0) return err; return i40e_get_hmc_context(context_bytes, i40e_hmc_txq_ce_info, (u8 *)s); } /** * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue * @hw: the hardware struct * @queue: the queue we care about **/ enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, u16 queue) { enum i40e_status_code err; u8 *context_bytes; err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue); if (err < 0) return err; return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX); } /** * i40e_set_lan_tx_queue_context - set the HMC context for the queue * @hw: the hardware struct * @queue: the queue we care about * @s: the struct to be filled **/ enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw, u16 queue, struct i40e_hmc_obj_txq *s) { enum i40e_status_code err; u8 *context_bytes; err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue); if (err < 0) return err; return i40e_set_hmc_context(context_bytes, i40e_hmc_txq_ce_info, (u8 *)s); } /** * i40e_get_lan_rx_queue_context - return the HMC context for the queue * @hw: the hardware struct * @queue: the queue we care about * @s: the struct to be filled **/ enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw, u16 queue, struct i40e_hmc_obj_rxq *s) { enum i40e_status_code err; u8 *context_bytes; err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue); if (err < 0) return err; return i40e_get_hmc_context(context_bytes, i40e_hmc_rxq_ce_info, (u8 *)s); } /** * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue * @hw: the hardware struct * @queue: the queue we care about **/ enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, u16 queue) { enum i40e_status_code err; u8 *context_bytes; err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue); if (err < 0) return err; return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX); } /** * i40e_set_lan_rx_queue_context - set the HMC context for the queue * @hw: the hardware struct * @queue: the queue we care about * @s: the struct to be filled **/ enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw, u16 queue, struct i40e_hmc_obj_rxq *s) { enum i40e_status_code err; u8 *context_bytes; err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue); if (err < 0) return err; return i40e_set_hmc_context(context_bytes, i40e_hmc_rxq_ce_info, (u8 *)s); } Index: releng/11.3/sys/dev/ixl/i40e_lan_hmc.h =================================================================== --- releng/11.3/sys/dev/ixl/i40e_lan_hmc.h (revision 349180) +++ releng/11.3/sys/dev/ixl/i40e_lan_hmc.h (revision 349181) @@ -1,201 +1,201 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _I40E_LAN_HMC_H_ #define _I40E_LAN_HMC_H_ /* forward-declare the HW struct for the compiler */ struct i40e_hw; /* HMC element context information */ /* Rx queue context data * * The sizes of the variables may be larger than needed due to crossing byte * boundaries. If we do not have the width of the variable set to the correct * size then we could end up shifting bits off the top of the variable when the * variable is at the top of a byte and crosses over into the next byte. */ struct i40e_hmc_obj_rxq { u16 head; u16 cpuid; /* bigger than needed, see above for reason */ u64 base; u16 qlen; #define I40E_RXQ_CTX_DBUFF_SHIFT 7 u16 dbuff; /* bigger than needed, see above for reason */ #define I40E_RXQ_CTX_HBUFF_SHIFT 6 u16 hbuff; /* bigger than needed, see above for reason */ u8 dtype; u8 dsize; u8 crcstrip; u8 fc_ena; u8 l2tsel; u8 hsplit_0; u8 hsplit_1; u8 showiv; u32 rxmax; /* bigger than needed, see above for reason */ u8 tphrdesc_ena; u8 tphwdesc_ena; u8 tphdata_ena; u8 tphhead_ena; u16 lrxqthresh; /* bigger than needed, see above for reason */ u8 prefena; /* NOTE: normally must be set to 1 at init */ }; /* Tx queue context data * * The sizes of the variables may be larger than needed due to crossing byte * boundaries. If we do not have the width of the variable set to the correct * size then we could end up shifting bits off the top of the variable when the * variable is at the top of a byte and crosses over into the next byte. */ struct i40e_hmc_obj_txq { u16 head; u8 new_context; u64 base; u8 fc_ena; u8 timesync_ena; u8 fd_ena; u8 alt_vlan_ena; u16 thead_wb; u8 cpuid; u8 head_wb_ena; u16 qlen; u8 tphrdesc_ena; u8 tphrpacket_ena; u8 tphwdesc_ena; u64 head_wb_addr; u32 crc; u16 rdylist; u8 rdylist_act; }; /* for hsplit_0 field of Rx HMC context */ enum i40e_hmc_obj_rx_hsplit_0 { I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0, I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1, I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2, I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4, I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8, }; /* fcoe_cntx and fcoe_filt are for debugging purpose only */ struct i40e_hmc_obj_fcoe_cntx { u32 rsv[32]; }; struct i40e_hmc_obj_fcoe_filt { u32 rsv[8]; }; /* Context sizes for LAN objects */ enum i40e_hmc_lan_object_size { I40E_HMC_LAN_OBJ_SZ_8 = 0x3, I40E_HMC_LAN_OBJ_SZ_16 = 0x4, I40E_HMC_LAN_OBJ_SZ_32 = 0x5, I40E_HMC_LAN_OBJ_SZ_64 = 0x6, I40E_HMC_LAN_OBJ_SZ_128 = 0x7, I40E_HMC_LAN_OBJ_SZ_256 = 0x8, I40E_HMC_LAN_OBJ_SZ_512 = 0x9, }; #define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512 #define I40E_HMC_OBJ_SIZE_TXQ 128 #define I40E_HMC_OBJ_SIZE_RXQ 32 #define I40E_HMC_OBJ_SIZE_FCOE_CNTX 64 #define I40E_HMC_OBJ_SIZE_FCOE_FILT 64 enum i40e_hmc_lan_rsrc_type { I40E_HMC_LAN_FULL = 0, I40E_HMC_LAN_TX = 1, I40E_HMC_LAN_RX = 2, I40E_HMC_FCOE_CTX = 3, I40E_HMC_FCOE_FILT = 4, I40E_HMC_LAN_MAX = 5 }; enum i40e_hmc_model { I40E_HMC_MODEL_DIRECT_PREFERRED = 0, I40E_HMC_MODEL_DIRECT_ONLY = 1, I40E_HMC_MODEL_PAGED_ONLY = 2, I40E_HMC_MODEL_UNKNOWN, }; struct i40e_hmc_lan_create_obj_info { struct i40e_hmc_info *hmc_info; u32 rsrc_type; u32 start_idx; u32 count; enum i40e_sd_entry_type entry_type; u64 direct_mode_sz; }; struct i40e_hmc_lan_delete_obj_info { struct i40e_hmc_info *hmc_info; u32 rsrc_type; u32 start_idx; u32 count; }; enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, u32 rxq_num, u32 fcoe_cntx_num, u32 fcoe_filt_num); enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw, enum i40e_hmc_model model); enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw); u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num, u32 fcoe_cntx_num, u32 fcoe_filt_num); enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw, u16 queue, struct i40e_hmc_obj_txq *s); enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, u16 queue); enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw, u16 queue, struct i40e_hmc_obj_txq *s); enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw, u16 queue, struct i40e_hmc_obj_rxq *s); enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, u16 queue); enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw, u16 queue, struct i40e_hmc_obj_rxq *s); enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw, struct i40e_hmc_lan_create_obj_info *info); enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw, struct i40e_hmc_lan_delete_obj_info *info); #endif /* _I40E_LAN_HMC_H_ */ Index: releng/11.3/sys/dev/ixl/i40e_nvm.c =================================================================== --- releng/11.3/sys/dev/ixl/i40e_nvm.c (revision 349180) +++ releng/11.3/sys/dev/ixl/i40e_nvm.c (revision 349181) @@ -1,1714 +1,1750 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "i40e_prototype.h" /** * i40e_init_nvm_ops - Initialize NVM function pointers * @hw: pointer to the HW structure * * Setup the function pointers and the NVM info structure. Should be called * once per NVM initialization, e.g. inside the i40e_init_shared_code(). * Please notice that the NVM term is used here (& in all methods covered * in this file) as an equivalent of the FLASH part mapped into the SR. * We are accessing FLASH always through the Shadow RAM. **/ enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw) { struct i40e_nvm_info *nvm = &hw->nvm; enum i40e_status_code ret_code = I40E_SUCCESS; u32 fla, gens; u8 sr_size; DEBUGFUNC("i40e_init_nvm"); /* The SR size is stored regardless of the nvm programming mode * as the blank mode may be used in the factory line. */ gens = rd32(hw, I40E_GLNVM_GENS); sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >> I40E_GLNVM_GENS_SR_SIZE_SHIFT); /* Switching to words (sr_size contains power of 2KB) */ nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB; /* Check if we are in the normal or blank NVM programming mode */ fla = rd32(hw, I40E_GLNVM_FLA); if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */ /* Max NVM timeout */ nvm->timeout = I40E_MAX_NVM_TIMEOUT; nvm->blank_nvm_mode = FALSE; } else { /* Blank programming mode */ nvm->blank_nvm_mode = TRUE; ret_code = I40E_ERR_NVM_BLANK_MODE; i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n"); } return ret_code; } /** * i40e_acquire_nvm - Generic request for acquiring the NVM ownership * @hw: pointer to the HW structure * @access: NVM access type (read or write) * * This function will request NVM ownership for reading * via the proper Admin Command. **/ enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw, enum i40e_aq_resource_access_type access) { enum i40e_status_code ret_code = I40E_SUCCESS; u64 gtime, timeout; u64 time_left = 0; DEBUGFUNC("i40e_acquire_nvm"); if (hw->nvm.blank_nvm_mode) goto i40e_i40e_acquire_nvm_exit; ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access, 0, &time_left, NULL); /* Reading the Global Device Timer */ gtime = rd32(hw, I40E_GLVFGEN_TIMER); /* Store the timeout */ hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime; if (ret_code) i40e_debug(hw, I40E_DEBUG_NVM, "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n", access, time_left, ret_code, hw->aq.asq_last_status); if (ret_code && time_left) { /* Poll until the current NVM owner timeouts */ timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime; while ((gtime < timeout) && time_left) { i40e_msec_delay(10); gtime = rd32(hw, I40E_GLVFGEN_TIMER); ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access, 0, &time_left, NULL); if (ret_code == I40E_SUCCESS) { hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime; break; } } if (ret_code != I40E_SUCCESS) { hw->nvm.hw_semaphore_timeout = 0; i40e_debug(hw, I40E_DEBUG_NVM, "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n", time_left, ret_code, hw->aq.asq_last_status); } } i40e_i40e_acquire_nvm_exit: return ret_code; } /** * i40e_release_nvm - Generic request for releasing the NVM ownership * @hw: pointer to the HW structure * * This function will release NVM resource via the proper Admin Command. **/ void i40e_release_nvm(struct i40e_hw *hw) { enum i40e_status_code ret_code = I40E_SUCCESS; u32 total_delay = 0; DEBUGFUNC("i40e_release_nvm"); if (hw->nvm.blank_nvm_mode) return; ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); /* there are some rare cases when trying to release the resource * results in an admin Q timeout, so handle them correctly */ while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) && (total_delay < hw->aq.asq_cmd_timeout)) { i40e_msec_delay(1); ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); total_delay++; } } /** * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit * @hw: pointer to the HW structure * * Polls the SRCTL Shadow RAM register done bit. **/ static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) { enum i40e_status_code ret_code = I40E_ERR_TIMEOUT; u32 srctl, wait_cnt; DEBUGFUNC("i40e_poll_sr_srctl_done_bit"); /* Poll the I40E_GLNVM_SRCTL until the done bit is set */ for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) { srctl = rd32(hw, I40E_GLNVM_SRCTL); if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) { ret_code = I40E_SUCCESS; break; } i40e_usec_delay(5); } if (ret_code == I40E_ERR_TIMEOUT) i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set"); return ret_code; } /** * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM * * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. **/ enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, u16 *data) { enum i40e_status_code ret_code = I40E_ERR_TIMEOUT; u32 sr_reg; DEBUGFUNC("i40e_read_nvm_word_srctl"); if (offset >= hw->nvm.sr_size) { i40e_debug(hw, I40E_DEBUG_NVM, "NVM read error: Offset %d beyond Shadow RAM limit %d\n", offset, hw->nvm.sr_size); ret_code = I40E_ERR_PARAM; goto read_nvm_exit; } /* Poll the done bit first */ ret_code = i40e_poll_sr_srctl_done_bit(hw); if (ret_code == I40E_SUCCESS) { /* Write the address and start reading */ sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | BIT(I40E_GLNVM_SRCTL_START_SHIFT); wr32(hw, I40E_GLNVM_SRCTL, sr_reg); /* Poll I40E_GLNVM_SRCTL until the done bit is set */ ret_code = i40e_poll_sr_srctl_done_bit(hw); if (ret_code == I40E_SUCCESS) { sr_reg = rd32(hw, I40E_GLNVM_SRDATA); *data = (u16)((sr_reg & I40E_GLNVM_SRDATA_RDDATA_MASK) >> I40E_GLNVM_SRDATA_RDDATA_SHIFT); } } if (ret_code != I40E_SUCCESS) i40e_debug(hw, I40E_DEBUG_NVM, "NVM read error: Couldn't access Shadow RAM address: 0x%x\n", offset); read_nvm_exit: return ret_code; } /** * i40e_read_nvm_aq - Read Shadow RAM. * @hw: pointer to the HW structure. * @module_pointer: module pointer location in words from the NVM beginning * @offset: offset in words from module start * @words: number of words to write * @data: buffer with words to write to the Shadow RAM * @last_command: tells the AdminQ that this is the last command * * Writes a 16 bit words buffer to the Shadow RAM using the admin command. **/ static enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 words, void *data, bool last_command) { enum i40e_status_code ret_code = I40E_ERR_NVM; struct i40e_asq_cmd_details cmd_details; DEBUGFUNC("i40e_read_nvm_aq"); memset(&cmd_details, 0, sizeof(cmd_details)); cmd_details.wb_desc = &hw->nvm_wb_desc; /* Here we are checking the SR limit only for the flat memory model. * We cannot do it for the module-based model, as we did not acquire * the NVM resource yet (we cannot get the module pointer value). * Firmware will check the module-based model. */ if ((offset + words) > hw->nvm.sr_size) i40e_debug(hw, I40E_DEBUG_NVM, "NVM write error: offset %d beyond Shadow RAM limit %d\n", (offset + words), hw->nvm.sr_size); else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) /* We can write only up to 4KB (one sector), in one AQ write */ i40e_debug(hw, I40E_DEBUG_NVM, "NVM write fail error: tried to write %d words, limit is %d.\n", words, I40E_SR_SECTOR_SIZE_IN_WORDS); else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) /* A single write cannot spread over two sectors */ i40e_debug(hw, I40E_DEBUG_NVM, "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", offset, words); else ret_code = i40e_aq_read_nvm(hw, module_pointer, 2 * offset, /*bytes*/ 2 * words, /*bytes*/ data, last_command, &cmd_details); return ret_code; } /** * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM * * Reads one 16 bit word from the Shadow RAM using the AdminQ **/ static enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, u16 *data) { enum i40e_status_code ret_code = I40E_ERR_TIMEOUT; DEBUGFUNC("i40e_read_nvm_word_aq"); ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, TRUE); *data = LE16_TO_CPU(*(__le16 *)data); return ret_code; } /** * __i40e_read_nvm_word - Reads NVM word, assumes caller does the locking * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM * * Reads one 16 bit word from the Shadow RAM. * * Do not use this function except in cases where the nvm lock is already * taken via i40e_acquire_nvm(). **/ enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data) { if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) return i40e_read_nvm_word_aq(hw, offset, data); return i40e_read_nvm_word_srctl(hw, offset, data); } /** * i40e_read_nvm_word - Reads NVM word, acquires lock if necessary * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM * * Reads one 16 bit word from the Shadow RAM. **/ enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data) { enum i40e_status_code ret_code = I40E_SUCCESS; if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (ret_code) return ret_code; ret_code = __i40e_read_nvm_word(hw, offset, data); if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) i40e_release_nvm(hw); return ret_code; } /** * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). * @words: (in) number of words to read; (out) number of words actually read * @data: words read from the Shadow RAM * * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() * method. The buffer read is preceded by the NVM ownership take * and followed by the release. **/ static enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data) { enum i40e_status_code ret_code = I40E_SUCCESS; u16 index, word; DEBUGFUNC("i40e_read_nvm_buffer_srctl"); /* Loop through the selected region */ for (word = 0; word < *words; word++) { index = offset + word; ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]); if (ret_code != I40E_SUCCESS) break; } /* Update the number of words read from the Shadow RAM */ *words = word; return ret_code; } /** * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). * @words: (in) number of words to read; (out) number of words actually read * @data: words read from the Shadow RAM * * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq() * method. The buffer read is preceded by the NVM ownership take * and followed by the release. **/ static enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data) { enum i40e_status_code ret_code; u16 read_size = *words; bool last_cmd = FALSE; u16 words_read = 0; u16 i = 0; DEBUGFUNC("i40e_read_nvm_buffer_aq"); do { /* Calculate number of bytes we should read in this step. * FVL AQ do not allow to read more than one page at a time or * to cross page boundaries. */ if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS) read_size = min(*words, (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS - (offset % I40E_SR_SECTOR_SIZE_IN_WORDS))); else read_size = min((*words - words_read), I40E_SR_SECTOR_SIZE_IN_WORDS); /* Check if this is last command, if so set proper flag */ if ((words_read + read_size) >= *words) last_cmd = TRUE; ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size, data + words_read, last_cmd); if (ret_code != I40E_SUCCESS) goto read_nvm_buffer_aq_exit; /* Increment counter for words already read and move offset to * new read location */ words_read += read_size; offset += read_size; } while (words_read < *words); for (i = 0; i < *words; i++) data[i] = LE16_TO_CPU(((__le16 *)data)[i]); read_nvm_buffer_aq_exit: *words = words_read; return ret_code; } /** * __i40e_read_nvm_buffer - Reads NVM buffer, caller must acquire lock * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). * @words: (in) number of words to read; (out) number of words actually read * @data: words read from the Shadow RAM * * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() * method. **/ enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data) { if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) return i40e_read_nvm_buffer_aq(hw, offset, words, data); return i40e_read_nvm_buffer_srctl(hw, offset, words, data); } /** * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). * @words: (in) number of words to read; (out) number of words actually read * @data: words read from the Shadow RAM * * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() * method. The buffer read is preceded by the NVM ownership take * and followed by the release. **/ enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data) { enum i40e_status_code ret_code = I40E_SUCCESS; if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (!ret_code) { ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data); i40e_release_nvm(hw); } } else { ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); } return ret_code; } /** * i40e_write_nvm_aq - Writes Shadow RAM. * @hw: pointer to the HW structure. * @module_pointer: module pointer location in words from the NVM beginning * @offset: offset in words from module start * @words: number of words to write * @data: buffer with words to write to the Shadow RAM * @last_command: tells the AdminQ that this is the last command * * Writes a 16 bit words buffer to the Shadow RAM using the admin command. **/ enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 words, void *data, bool last_command) { enum i40e_status_code ret_code = I40E_ERR_NVM; struct i40e_asq_cmd_details cmd_details; DEBUGFUNC("i40e_write_nvm_aq"); memset(&cmd_details, 0, sizeof(cmd_details)); cmd_details.wb_desc = &hw->nvm_wb_desc; /* Here we are checking the SR limit only for the flat memory model. * We cannot do it for the module-based model, as we did not acquire * the NVM resource yet (we cannot get the module pointer value). * Firmware will check the module-based model. */ if ((offset + words) > hw->nvm.sr_size) DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n"); else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) /* We can write only up to 4KB (one sector), in one AQ write */ DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n"); else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) /* A single write cannot spread over two sectors */ DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n"); else ret_code = i40e_aq_update_nvm(hw, module_pointer, 2 * offset, /*bytes*/ 2 * words, /*bytes*/ data, last_command, 0, &cmd_details); return ret_code; } /** * __i40e_write_nvm_word - Writes Shadow RAM word * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to write * @data: word to write to the Shadow RAM * * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method. * NVM ownership have to be acquired and released (on ARQ completion event * reception) by caller. To commit SR to NVM update checksum function * should be called. **/ enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset, void *data) { DEBUGFUNC("i40e_write_nvm_word"); *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data)); /* Value 0x00 below means that we treat SR as a flat mem */ return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, FALSE); } /** * __i40e_write_nvm_buffer - Writes Shadow RAM buffer * @hw: pointer to the HW structure * @module_pointer: module pointer location in words from the NVM beginning * @offset: offset of the Shadow RAM buffer to write * @words: number of words to write * @data: words to write to the Shadow RAM * * Writes a 16 bit words buffer to the Shadow RAM using the admin command. * NVM ownership must be acquired before calling this function and released * on ARQ completion event reception by caller. To commit SR to NVM update * checksum function should be called. **/ enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 words, void *data) { __le16 *le_word_ptr = (__le16 *)data; u16 *word_ptr = (u16 *)data; u32 i = 0; DEBUGFUNC("i40e_write_nvm_buffer"); for (i = 0; i < words; i++) le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]); /* Here we will only write one buffer as the size of the modules * mirrored in the Shadow RAM is always less than 4K. */ return i40e_write_nvm_aq(hw, module_pointer, offset, words, data, FALSE); } /** * i40e_calc_nvm_checksum - Calculates and returns the checksum * @hw: pointer to hardware structure * @checksum: pointer to the checksum * * This function calculates SW Checksum that covers the whole 64kB shadow RAM * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD * is customer specific and unknown. Therefore, this function skips all maximum * possible size of VPD (1kB). **/ enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum) { enum i40e_status_code ret_code = I40E_SUCCESS; struct i40e_virt_mem vmem; u16 pcie_alt_module = 0; u16 checksum_local = 0; u16 vpd_module = 0; u16 *data; u16 i = 0; DEBUGFUNC("i40e_calc_nvm_checksum"); ret_code = i40e_allocate_virt_mem(hw, &vmem, I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16)); if (ret_code) goto i40e_calc_nvm_checksum_exit; data = (u16 *)vmem.va; /* read pointer to VPD area */ ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); if (ret_code != I40E_SUCCESS) { ret_code = I40E_ERR_NVM_CHECKSUM; goto i40e_calc_nvm_checksum_exit; } /* read pointer to PCIe Alt Auto-load module */ ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, &pcie_alt_module); if (ret_code != I40E_SUCCESS) { ret_code = I40E_ERR_NVM_CHECKSUM; goto i40e_calc_nvm_checksum_exit; } /* Calculate SW checksum that covers the whole 64kB shadow RAM * except the VPD and PCIe ALT Auto-load modules */ for (i = 0; i < hw->nvm.sr_size; i++) { /* Read SR page */ if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) { u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS; ret_code = __i40e_read_nvm_buffer(hw, i, &words, data); if (ret_code != I40E_SUCCESS) { ret_code = I40E_ERR_NVM_CHECKSUM; goto i40e_calc_nvm_checksum_exit; } } /* Skip Checksum word */ if (i == I40E_SR_SW_CHECKSUM_WORD) continue; /* Skip VPD module (convert byte size to word count) */ if ((i >= (u32)vpd_module) && (i < ((u32)vpd_module + (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) { continue; } /* Skip PCIe ALT module (convert byte size to word count) */ if ((i >= (u32)pcie_alt_module) && (i < ((u32)pcie_alt_module + (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) { continue; } checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS]; } *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local; i40e_calc_nvm_checksum_exit: i40e_free_virt_mem(hw, &vmem); return ret_code; } /** * i40e_update_nvm_checksum - Updates the NVM checksum * @hw: pointer to hardware structure * * NVM ownership must be acquired before calling this function and released * on ARQ completion event reception by caller. * This function will commit SR to NVM. **/ enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw) { enum i40e_status_code ret_code = I40E_SUCCESS; u16 checksum; __le16 le_sum; DEBUGFUNC("i40e_update_nvm_checksum"); ret_code = i40e_calc_nvm_checksum(hw, &checksum); le_sum = CPU_TO_LE16(checksum); if (ret_code == I40E_SUCCESS) ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD, 1, &le_sum, TRUE); return ret_code; } /** * i40e_validate_nvm_checksum - Validate EEPROM checksum * @hw: pointer to hardware structure * @checksum: calculated checksum * * Performs checksum calculation and validates the NVM SW checksum. If the * caller does not need checksum, the value can be NULL. **/ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw, u16 *checksum) { enum i40e_status_code ret_code = I40E_SUCCESS; u16 checksum_sr = 0; u16 checksum_local = 0; DEBUGFUNC("i40e_validate_nvm_checksum"); /* We must acquire the NVM lock in order to correctly synchronize the * NVM accesses across multiple PFs. Without doing so it is possible * for one of the PFs to read invalid data potentially indicating that * the checksum is invalid. */ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (ret_code) return ret_code; ret_code = i40e_calc_nvm_checksum(hw, &checksum_local); __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr); i40e_release_nvm(hw); if (ret_code) return ret_code; /* Verify read checksum from EEPROM is the same as * calculated checksum */ if (checksum_local != checksum_sr) ret_code = I40E_ERR_NVM_CHECKSUM; /* If the user cares, return the calculated checksum */ if (checksum) *checksum = checksum_local; return ret_code; } static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno); static enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno); static enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno); static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd, int *perrno); static enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw, struct i40e_nvm_access *cmd, int *perrno); static enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno); static enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno); static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno); static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno); static enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno); static INLINE u8 i40e_nvmupd_get_module(u32 val) { return (u8)(val & I40E_NVM_MOD_PNT_MASK); } static INLINE u8 i40e_nvmupd_get_transaction(u32 val) { return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT); } static INLINE u8 i40e_nvmupd_get_preservation_flags(u32 val) { return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >> I40E_NVM_PRESERVATION_FLAGS_SHIFT); } static const char *i40e_nvm_update_state_str[] = { "I40E_NVMUPD_INVALID", "I40E_NVMUPD_READ_CON", "I40E_NVMUPD_READ_SNT", "I40E_NVMUPD_READ_LCB", "I40E_NVMUPD_READ_SA", "I40E_NVMUPD_WRITE_ERA", "I40E_NVMUPD_WRITE_CON", "I40E_NVMUPD_WRITE_SNT", "I40E_NVMUPD_WRITE_LCB", "I40E_NVMUPD_WRITE_SA", "I40E_NVMUPD_CSUM_CON", "I40E_NVMUPD_CSUM_SA", "I40E_NVMUPD_CSUM_LCB", "I40E_NVMUPD_STATUS", "I40E_NVMUPD_EXEC_AQ", "I40E_NVMUPD_GET_AQ_RESULT", "I40E_NVMUPD_GET_AQ_EVENT", + "I40E_NVMUPD_GET_FEATURES", }; /** * i40e_nvmupd_command - Process an NVM update command * @hw: pointer to hardware structure * @cmd: pointer to nvm update command * @bytes: pointer to the data buffer * @perrno: pointer to return error code * * Dispatches command depending on what update state is current **/ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno) { enum i40e_status_code status; enum i40e_nvmupd_cmd upd_cmd; DEBUGFUNC("i40e_nvmupd_command"); /* assume success */ *perrno = 0; /* early check for status command and debug msgs */ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", i40e_nvm_update_state_str[upd_cmd], hw->nvmupd_state, hw->nvm_release_on_done, hw->nvm_wait_opcode, cmd->command, cmd->config, cmd->offset, cmd->data_size); if (upd_cmd == I40E_NVMUPD_INVALID) { *perrno = -EFAULT; i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_validate_command returns %d errno %d\n", upd_cmd, *perrno); } /* a status request returns immediately rather than * going into the state machine */ if (upd_cmd == I40E_NVMUPD_STATUS) { if (!cmd->data_size) { *perrno = -EFAULT; return I40E_ERR_BUF_TOO_SHORT; } bytes[0] = hw->nvmupd_state; if (cmd->data_size >= 4) { bytes[1] = 0; *((u16 *)&bytes[2]) = hw->nvm_wait_opcode; } /* Clear error status on read */ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; return I40E_SUCCESS; } + /* + * A supported features request returns immediately + * rather than going into state machine + */ + if (upd_cmd == I40E_NVMUPD_FEATURES) { + if (cmd->data_size < hw->nvmupd_features.size) { + *perrno = -EFAULT; + return I40E_ERR_BUF_TOO_SHORT; + } + + /* + * If buffer is bigger than i40e_nvmupd_features structure, + * make sure the trailing bytes are set to 0x0. + */ + if (cmd->data_size > hw->nvmupd_features.size) + i40e_memset(bytes + hw->nvmupd_features.size, 0x0, + cmd->data_size - hw->nvmupd_features.size, + I40E_NONDMA_MEM); + + i40e_memcpy(bytes, &hw->nvmupd_features, + hw->nvmupd_features.size, I40E_NONDMA_MEM); + + return I40E_SUCCESS; + } + /* Clear status even it is not read and log */ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) { i40e_debug(hw, I40E_DEBUG_NVM, "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n"); hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } /* Acquire lock to prevent race condition where adminq_task * can execute after i40e_nvmupd_nvm_read/write but before state * variables (nvm_wait_opcode, nvm_release_on_done) are updated. * * During NVMUpdate, it is observed that lock could be held for * ~5ms for most commands. However lock is held for ~60ms for * NVMUPD_CSUM_LCB command. */ i40e_acquire_spinlock(&hw->aq.arq_spinlock); switch (hw->nvmupd_state) { case I40E_NVMUPD_STATE_INIT: status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno); break; case I40E_NVMUPD_STATE_READING: status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno); break; case I40E_NVMUPD_STATE_WRITING: status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno); break; case I40E_NVMUPD_STATE_INIT_WAIT: case I40E_NVMUPD_STATE_WRITE_WAIT: /* if we need to stop waiting for an event, clear * the wait info and return before doing anything else */ if (cmd->offset == 0xffff) { i40e_nvmupd_clear_wait_state(hw); status = I40E_SUCCESS; break; } status = I40E_ERR_NOT_READY; *perrno = -EBUSY; break; default: /* invalid state, should never happen */ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: no such state %d\n", hw->nvmupd_state); status = I40E_NOT_SUPPORTED; *perrno = -ESRCH; break; } i40e_release_spinlock(&hw->aq.arq_spinlock); return status; } /** * i40e_nvmupd_state_init - Handle NVM update state Init * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer * @perrno: pointer to return error code * * Process legitimate commands of the Init state and conditionally set next * state. Reject all other commands. **/ static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno) { enum i40e_status_code status = I40E_SUCCESS; enum i40e_nvmupd_cmd upd_cmd; DEBUGFUNC("i40e_nvmupd_state_init"); upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); switch (upd_cmd) { case I40E_NVMUPD_READ_SA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (status) { *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); i40e_release_nvm(hw); } break; case I40E_NVMUPD_READ_SNT: status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (status) { *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); if (status) i40e_release_nvm(hw); else hw->nvmupd_state = I40E_NVMUPD_STATE_READING; } break; case I40E_NVMUPD_WRITE_ERA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_erase(hw, cmd, perrno); if (status) { i40e_release_nvm(hw); } else { hw->nvm_release_on_done = TRUE; hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } } break; case I40E_NVMUPD_WRITE_SA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); if (status) { i40e_release_nvm(hw); } else { hw->nvm_release_on_done = TRUE; hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } } break; case I40E_NVMUPD_WRITE_SNT: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); if (status) { i40e_release_nvm(hw); } else { hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; } } break; case I40E_NVMUPD_CSUM_SA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } else { status = i40e_update_nvm_checksum(hw); if (status) { *perrno = hw->aq.asq_last_status ? i40e_aq_rc_to_posix(status, hw->aq.asq_last_status) : -EIO; i40e_release_nvm(hw); } else { hw->nvm_release_on_done = TRUE; hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } } break; case I40E_NVMUPD_EXEC_AQ: status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno); break; case I40E_NVMUPD_GET_AQ_RESULT: status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno); break; case I40E_NVMUPD_GET_AQ_EVENT: status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno); break; default: i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: bad cmd %s in init state\n", i40e_nvm_update_state_str[upd_cmd]); status = I40E_ERR_NVM; *perrno = -ESRCH; break; } return status; } /** * i40e_nvmupd_state_reading - Handle NVM update state Reading * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer * @perrno: pointer to return error code * * NVM ownership is already held. Process legitimate commands and set any * change in state; reject all other commands. **/ static enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno) { enum i40e_status_code status = I40E_SUCCESS; enum i40e_nvmupd_cmd upd_cmd; DEBUGFUNC("i40e_nvmupd_state_reading"); upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); switch (upd_cmd) { case I40E_NVMUPD_READ_SA: case I40E_NVMUPD_READ_CON: status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); break; case I40E_NVMUPD_READ_LCB: status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); i40e_release_nvm(hw); hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; break; default: i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: bad cmd %s in reading state.\n", i40e_nvm_update_state_str[upd_cmd]); status = I40E_NOT_SUPPORTED; *perrno = -ESRCH; break; } return status; } /** * i40e_nvmupd_state_writing - Handle NVM update state Writing * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer * @perrno: pointer to return error code * * NVM ownership is already held. Process legitimate commands and set any * change in state; reject all other commands **/ static enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno) { enum i40e_status_code status = I40E_SUCCESS; enum i40e_nvmupd_cmd upd_cmd; bool retry_attempt = FALSE; DEBUGFUNC("i40e_nvmupd_state_writing"); upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); retry: switch (upd_cmd) { case I40E_NVMUPD_WRITE_CON: status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); if (!status) { hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; } break; case I40E_NVMUPD_WRITE_LCB: status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); if (status) { *perrno = hw->aq.asq_last_status ? i40e_aq_rc_to_posix(status, hw->aq.asq_last_status) : -EIO; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } else { hw->nvm_release_on_done = TRUE; hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } break; case I40E_NVMUPD_CSUM_CON: /* Assumes the caller has acquired the nvm */ status = i40e_update_nvm_checksum(hw); if (status) { *perrno = hw->aq.asq_last_status ? i40e_aq_rc_to_posix(status, hw->aq.asq_last_status) : -EIO; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } else { hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; } break; case I40E_NVMUPD_CSUM_LCB: /* Assumes the caller has acquired the nvm */ status = i40e_update_nvm_checksum(hw); if (status) { *perrno = hw->aq.asq_last_status ? i40e_aq_rc_to_posix(status, hw->aq.asq_last_status) : -EIO; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } else { hw->nvm_release_on_done = TRUE; hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } break; default: i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: bad cmd %s in writing state.\n", i40e_nvm_update_state_str[upd_cmd]); status = I40E_NOT_SUPPORTED; *perrno = -ESRCH; break; } /* In some circumstances, a multi-write transaction takes longer * than the default 3 minute timeout on the write semaphore. If * the write failed with an EBUSY status, this is likely the problem, * so here we try to reacquire the semaphore then retry the write. * We only do one retry, then give up. */ if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) && !retry_attempt) { enum i40e_status_code old_status = status; u32 old_asq_status = hw->aq.asq_last_status; u32 gtime; gtime = rd32(hw, I40E_GLVFGEN_TIMER); if (gtime >= hw->nvm.hw_semaphore_timeout) { i40e_debug(hw, I40E_DEBUG_ALL, "NVMUPD: write semaphore expired (%d >= %lld), retrying\n", gtime, hw->nvm.hw_semaphore_timeout); i40e_release_nvm(hw); status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { i40e_debug(hw, I40E_DEBUG_ALL, "NVMUPD: write semaphore reacquire failed aq_err = %d\n", hw->aq.asq_last_status); status = old_status; hw->aq.asq_last_status = old_asq_status; } else { retry_attempt = TRUE; goto retry; } } } return status; } /** * i40e_nvmupd_clear_wait_state - clear wait state on hw * @hw: pointer to the hardware structure **/ void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw) { i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: clearing wait on opcode 0x%04x\n", hw->nvm_wait_opcode); if (hw->nvm_release_on_done) { i40e_release_nvm(hw); hw->nvm_release_on_done = FALSE; } hw->nvm_wait_opcode = 0; if (hw->aq.arq_last_status) { hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR; return; } switch (hw->nvmupd_state) { case I40E_NVMUPD_STATE_INIT_WAIT: hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; break; case I40E_NVMUPD_STATE_WRITE_WAIT: hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; break; default: break; } } /** * i40e_nvmupd_check_wait_event - handle NVM update operation events * @hw: pointer to the hardware structure * @opcode: the event that just happened * @desc: AdminQ descriptor **/ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, struct i40e_aq_desc *desc) { u32 aq_desc_len = sizeof(struct i40e_aq_desc); if (opcode == hw->nvm_wait_opcode) { i40e_memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len, I40E_NONDMA_TO_NONDMA); i40e_nvmupd_clear_wait_state(hw); } } /** * i40e_nvmupd_validate_command - Validate given command * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @perrno: pointer to return error code * * Return one of the valid command types or I40E_NVMUPD_INVALID **/ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd, int *perrno) { enum i40e_nvmupd_cmd upd_cmd; u8 module, transaction; DEBUGFUNC("i40e_nvmupd_validate_command\n"); /* anything that doesn't match a recognized case is an error */ upd_cmd = I40E_NVMUPD_INVALID; transaction = i40e_nvmupd_get_transaction(cmd->config); module = i40e_nvmupd_get_module(cmd->config); /* limits on data size */ if ((cmd->data_size < 1) || (cmd->data_size > I40E_NVMUPD_MAX_DATA)) { i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_validate_command data_size %d\n", cmd->data_size); *perrno = -EFAULT; return I40E_NVMUPD_INVALID; } switch (cmd->command) { case I40E_NVM_READ: switch (transaction) { case I40E_NVM_CON: upd_cmd = I40E_NVMUPD_READ_CON; break; case I40E_NVM_SNT: upd_cmd = I40E_NVMUPD_READ_SNT; break; case I40E_NVM_LCB: upd_cmd = I40E_NVMUPD_READ_LCB; break; case I40E_NVM_SA: upd_cmd = I40E_NVMUPD_READ_SA; break; case I40E_NVM_EXEC: - if (module == 0xf) - upd_cmd = I40E_NVMUPD_STATUS; - else if (module == 0) + switch (module) { + case I40E_NVM_EXEC_GET_AQ_RESULT: upd_cmd = I40E_NVMUPD_GET_AQ_RESULT; + break; + case I40E_NVM_EXEC_FEATURES: + upd_cmd = I40E_NVMUPD_FEATURES; + break; + case I40E_NVM_EXEC_STATUS: + upd_cmd = I40E_NVMUPD_STATUS; + break; + default: + *perrno = -EFAULT; + return I40E_NVMUPD_INVALID; + } break; case I40E_NVM_AQE: upd_cmd = I40E_NVMUPD_GET_AQ_EVENT; break; } break; case I40E_NVM_WRITE: switch (transaction) { case I40E_NVM_CON: upd_cmd = I40E_NVMUPD_WRITE_CON; break; case I40E_NVM_SNT: upd_cmd = I40E_NVMUPD_WRITE_SNT; break; case I40E_NVM_LCB: upd_cmd = I40E_NVMUPD_WRITE_LCB; break; case I40E_NVM_SA: upd_cmd = I40E_NVMUPD_WRITE_SA; break; case I40E_NVM_ERA: upd_cmd = I40E_NVMUPD_WRITE_ERA; break; case I40E_NVM_CSUM: upd_cmd = I40E_NVMUPD_CSUM_CON; break; case (I40E_NVM_CSUM|I40E_NVM_SA): upd_cmd = I40E_NVMUPD_CSUM_SA; break; case (I40E_NVM_CSUM|I40E_NVM_LCB): upd_cmd = I40E_NVMUPD_CSUM_LCB; break; case I40E_NVM_EXEC: if (module == 0) upd_cmd = I40E_NVMUPD_EXEC_AQ; break; } break; } return upd_cmd; } /** * i40e_nvmupd_exec_aq - Run an AQ command * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer * @perrno: pointer to return error code * * cmd structure contains identifiers and data buffer **/ static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno) { struct i40e_asq_cmd_details cmd_details; enum i40e_status_code status; struct i40e_aq_desc *aq_desc; u32 buff_size = 0; u8 *buff = NULL; u32 aq_desc_len; u32 aq_data_len; i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); if (cmd->offset == 0xffff) return I40E_SUCCESS; memset(&cmd_details, 0, sizeof(cmd_details)); cmd_details.wb_desc = &hw->nvm_wb_desc; aq_desc_len = sizeof(struct i40e_aq_desc); memset(&hw->nvm_wb_desc, 0, aq_desc_len); /* get the aq descriptor */ if (cmd->data_size < aq_desc_len) { i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n", cmd->data_size, aq_desc_len); *perrno = -EINVAL; return I40E_ERR_PARAM; } aq_desc = (struct i40e_aq_desc *)bytes; /* if data buffer needed, make sure it's ready */ aq_data_len = cmd->data_size - aq_desc_len; buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen)); if (buff_size) { if (!hw->nvm_buff.va) { status = i40e_allocate_virt_mem(hw, &hw->nvm_buff, hw->aq.asq_buf_size); if (status) i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n", status); } if (hw->nvm_buff.va) { buff = hw->nvm_buff.va; i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len, I40E_NONDMA_TO_NONDMA); } } if (cmd->offset) memset(&hw->nvm_aq_event_desc, 0, aq_desc_len); /* and away we go! */ status = i40e_asq_send_command(hw, aq_desc, buff, buff_size, &cmd_details); if (status) { i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_exec_aq err %s aq_err %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); return status; } /* should we wait for a followup event? */ if (cmd->offset) { hw->nvm_wait_opcode = cmd->offset; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } return status; } /** * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer * @perrno: pointer to return error code * * cmd structure contains identifiers and data buffer **/ static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno) { u32 aq_total_len; u32 aq_desc_len; int remainder; u8 *buff; i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); aq_desc_len = sizeof(struct i40e_aq_desc); aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen); /* check offset range */ if (cmd->offset > aq_total_len) { i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n", __func__, cmd->offset, aq_total_len); *perrno = -EINVAL; return I40E_ERR_PARAM; } /* check copylength range */ if (cmd->data_size > (aq_total_len - cmd->offset)) { int new_len = aq_total_len - cmd->offset; i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n", __func__, cmd->data_size, new_len); cmd->data_size = new_len; } remainder = cmd->data_size; if (cmd->offset < aq_desc_len) { u32 len = aq_desc_len - cmd->offset; len = min(len, cmd->data_size); i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n", __func__, cmd->offset, cmd->offset + len); buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset; i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA); bytes += len; remainder -= len; buff = hw->nvm_buff.va; } else { buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len); } if (remainder > 0) { int start_byte = buff - (u8 *)hw->nvm_buff.va; i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n", __func__, start_byte, start_byte + remainder); i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA); } return I40E_SUCCESS; } /** * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer * @perrno: pointer to return error code * * cmd structure contains identifiers and data buffer **/ static enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno) { u32 aq_total_len; u32 aq_desc_len; i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); aq_desc_len = sizeof(struct i40e_aq_desc); aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_aq_event_desc.datalen); /* check copylength range */ if (cmd->data_size > aq_total_len) { i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n", __func__, cmd->data_size, aq_total_len); cmd->data_size = aq_total_len; } i40e_memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size, I40E_NONDMA_TO_NONDMA); return I40E_SUCCESS; } /** * i40e_nvmupd_nvm_read - Read NVM * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer * @perrno: pointer to return error code * * cmd structure contains identifiers and data buffer **/ static enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno) { struct i40e_asq_cmd_details cmd_details; enum i40e_status_code status; u8 module, transaction; bool last; transaction = i40e_nvmupd_get_transaction(cmd->config); module = i40e_nvmupd_get_module(cmd->config); last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA); memset(&cmd_details, 0, sizeof(cmd_details)); cmd_details.wb_desc = &hw->nvm_wb_desc; status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size, bytes, last, &cmd_details); if (status) { i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n", module, cmd->offset, cmd->data_size); i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_read status %d aq %d\n", status, hw->aq.asq_last_status); *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } return status; } /** * i40e_nvmupd_nvm_erase - Erase an NVM module * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @perrno: pointer to return error code * * module, offset, data_size and data are in cmd structure **/ static enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw, struct i40e_nvm_access *cmd, int *perrno) { enum i40e_status_code status = I40E_SUCCESS; struct i40e_asq_cmd_details cmd_details; u8 module, transaction; bool last; transaction = i40e_nvmupd_get_transaction(cmd->config); module = i40e_nvmupd_get_module(cmd->config); last = (transaction & I40E_NVM_LCB); memset(&cmd_details, 0, sizeof(cmd_details)); cmd_details.wb_desc = &hw->nvm_wb_desc; status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size, last, &cmd_details); if (status) { i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n", module, cmd->offset, cmd->data_size); i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_erase status %d aq %d\n", status, hw->aq.asq_last_status); *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } return status; } /** * i40e_nvmupd_nvm_write - Write NVM * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer * @perrno: pointer to return error code * * module, offset, data_size and data are in cmd structure **/ static enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno) { enum i40e_status_code status = I40E_SUCCESS; struct i40e_asq_cmd_details cmd_details; u8 module, transaction; u8 preservation_flags; bool last; transaction = i40e_nvmupd_get_transaction(cmd->config); module = i40e_nvmupd_get_module(cmd->config); last = (transaction & I40E_NVM_LCB); preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config); memset(&cmd_details, 0, sizeof(cmd_details)); cmd_details.wb_desc = &hw->nvm_wb_desc; status = i40e_aq_update_nvm(hw, module, cmd->offset, (u16)cmd->data_size, bytes, last, preservation_flags, &cmd_details); if (status) { i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n", module, cmd->offset, cmd->data_size); i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_write status %d aq %d\n", status, hw->aq.asq_last_status); *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } return status; } Index: releng/11.3/sys/dev/ixl/i40e_osdep.c =================================================================== --- releng/11.3/sys/dev/ixl/i40e_osdep.c (revision 349180) +++ releng/11.3/sys/dev/ixl/i40e_osdep.c (revision 349181) @@ -1,278 +1,267 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include +#include #include "ixl.h" /******************************************************************** * Manage DMA'able memory. *******************************************************************/ static void i40e_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error) { if (error) return; *(bus_addr_t *) arg = segs->ds_addr; return; } i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem, u32 size) { mem->va = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); return(mem->va == NULL); } i40e_status i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem) { free(mem->va, M_DEVBUF); mem->va = NULL; return(0); } i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem, enum i40e_memory_type type __unused, u64 size, u32 alignment) { device_t dev = ((struct i40e_osdep *)hw->back)->dev; int err; err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ alignment, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &mem->tag); if (err != 0) { device_printf(dev, "i40e_allocate_dma: bus_dma_tag_create failed, " "error %u\n", err); goto fail_0; } err = bus_dmamem_alloc(mem->tag, (void **)&mem->va, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map); if (err != 0) { device_printf(dev, "i40e_allocate_dma: bus_dmamem_alloc failed, " "error %u\n", err); goto fail_1; } err = bus_dmamap_load(mem->tag, mem->map, mem->va, size, i40e_dmamap_cb, &mem->pa, BUS_DMA_NOWAIT); if (err != 0) { device_printf(dev, "i40e_allocate_dma: bus_dmamap_load failed, " "error %u\n", err); goto fail_2; } mem->nseg = 1; mem->size = size; bus_dmamap_sync(mem->tag, mem->map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); return (0); fail_2: bus_dmamem_free(mem->tag, mem->va, mem->map); fail_1: bus_dma_tag_destroy(mem->tag); fail_0: mem->map = NULL; mem->tag = NULL; return (err); } i40e_status i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem) { bus_dmamap_sync(mem->tag, mem->map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(mem->tag, mem->map); bus_dmamem_free(mem->tag, mem->va, mem->map); bus_dma_tag_destroy(mem->tag); return (0); } void i40e_init_spinlock(struct i40e_spinlock *lock) { mtx_init(&lock->mutex, "mutex", "ixl spinlock", MTX_DEF | MTX_DUPOK); } void i40e_acquire_spinlock(struct i40e_spinlock *lock) { mtx_lock(&lock->mutex); } void i40e_release_spinlock(struct i40e_spinlock *lock) { mtx_unlock(&lock->mutex); } void i40e_destroy_spinlock(struct i40e_spinlock *lock) { if (mtx_initialized(&lock->mutex)) mtx_destroy(&lock->mutex); } +#ifndef MSEC_2_TICKS +#define MSEC_2_TICKS(m) max(1, (uint32_t)((hz == 1000) ? \ + (m) : ((uint64_t)(m) * (uint64_t)hz)/(uint64_t)1000)) +#endif + void i40e_msec_pause(int msecs) { - int ticks_to_pause = (msecs * hz) / 1000; - int start_ticks = ticks; - - if (cold || SCHEDULER_STOPPED()) { - i40e_msec_delay(msecs); - return; - } - - while (1) { - kern_yield(PRI_USER); - int yielded_ticks = ticks - start_ticks; - if (yielded_ticks > ticks_to_pause) - break; - else if (yielded_ticks < 0 - && (yielded_ticks + INT_MAX + 1 > ticks_to_pause)) { - break; - } - } + pause("i40e_msec_pause", MSEC_2_TICKS(msecs)); } /* * Helper function for debug statement printing */ void i40e_debug_shared(struct i40e_hw *hw, enum i40e_debug_mask mask, char *fmt, ...) { va_list args; device_t dev; if (!(mask & ((struct i40e_hw *)hw)->debug_mask)) return; dev = ((struct i40e_osdep *)hw->back)->dev; /* Re-implement device_printf() */ device_print_prettyname(dev); va_start(args, fmt); vprintf(fmt, args); va_end(args); } const char * ixl_vc_opcode_str(uint16_t op) { switch (op) { case VIRTCHNL_OP_VERSION: return ("VERSION"); case VIRTCHNL_OP_RESET_VF: return ("RESET_VF"); case VIRTCHNL_OP_GET_VF_RESOURCES: return ("GET_VF_RESOURCES"); case VIRTCHNL_OP_CONFIG_TX_QUEUE: return ("CONFIG_TX_QUEUE"); case VIRTCHNL_OP_CONFIG_RX_QUEUE: return ("CONFIG_RX_QUEUE"); case VIRTCHNL_OP_CONFIG_VSI_QUEUES: return ("CONFIG_VSI_QUEUES"); case VIRTCHNL_OP_CONFIG_IRQ_MAP: return ("CONFIG_IRQ_MAP"); case VIRTCHNL_OP_ENABLE_QUEUES: return ("ENABLE_QUEUES"); case VIRTCHNL_OP_DISABLE_QUEUES: return ("DISABLE_QUEUES"); case VIRTCHNL_OP_ADD_ETH_ADDR: return ("ADD_ETH_ADDR"); case VIRTCHNL_OP_DEL_ETH_ADDR: return ("DEL_ETH_ADDR"); case VIRTCHNL_OP_ADD_VLAN: return ("ADD_VLAN"); case VIRTCHNL_OP_DEL_VLAN: return ("DEL_VLAN"); case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: return ("CONFIG_PROMISCUOUS_MODE"); case VIRTCHNL_OP_GET_STATS: return ("GET_STATS"); case VIRTCHNL_OP_RSVD: return ("RSVD"); case VIRTCHNL_OP_EVENT: return ("EVENT"); case VIRTCHNL_OP_CONFIG_RSS_KEY: return ("CONFIG_RSS_KEY"); case VIRTCHNL_OP_CONFIG_RSS_LUT: return ("CONFIG_RSS_LUT"); case VIRTCHNL_OP_GET_RSS_HENA_CAPS: return ("GET_RSS_HENA_CAPS"); case VIRTCHNL_OP_SET_RSS_HENA: return ("SET_RSS_HENA"); default: return ("UNKNOWN"); } } u16 i40e_read_pci_cfg(struct i40e_hw *hw, u32 reg) { u16 value; value = pci_read_config(((struct i40e_osdep *)hw->back)->dev, reg, 2); return (value); } void i40e_write_pci_cfg(struct i40e_hw *hw, u32 reg, u16 value) { pci_write_config(((struct i40e_osdep *)hw->back)->dev, reg, value, 2); return; } Index: releng/11.3/sys/dev/ixl/i40e_osdep.h =================================================================== --- releng/11.3/sys/dev/ixl/i40e_osdep.h (revision 349180) +++ releng/11.3/sys/dev/ixl/i40e_osdep.h (revision 349181) @@ -1,245 +1,245 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _I40E_OSDEP_H_ #define _I40E_OSDEP_H_ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define i40e_usec_delay(x) DELAY(x) #define i40e_msec_delay(x) DELAY(1000 * (x)) #define DBG 0 #define MSGOUT(S, A, B) printf(S "\n", A, B) #define DEBUGFUNC(F) DEBUGOUT(F); #if DBG #define DEBUGOUT(S) printf(S "\n") #define DEBUGOUT1(S,A) printf(S "\n",A) #define DEBUGOUT2(S,A,B) printf(S "\n",A,B) #define DEBUGOUT3(S,A,B,C) printf(S "\n",A,B,C) #define DEBUGOUT7(S,A,B,C,D,E,F,G) printf(S "\n",A,B,C,D,E,F,G) #else #define DEBUGOUT(S) #define DEBUGOUT1(S,A) #define DEBUGOUT2(S,A,B) #define DEBUGOUT3(S,A,B,C) #define DEBUGOUT6(S,A,B,C,D,E,F) #define DEBUGOUT7(S,A,B,C,D,E,F,G) #endif /* Remove unused shared code macros */ #define UNREFERENCED_PARAMETER(_p) #define UNREFERENCED_1PARAMETER(_p) #define UNREFERENCED_2PARAMETER(_p, _q) #define UNREFERENCED_3PARAMETER(_p, _q, _r) #define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) #define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t) #define STATIC static #define INLINE inline #define FALSE 0 #define false 0 /* shared code requires this */ #define TRUE 1 #define true 1 #define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */ #define PCI_COMMAND_REGISTER PCIR_COMMAND #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) #define i40e_memset(a, b, c, d) memset((a), (b), (c)) #define i40e_memcpy(a, b, c, d) memcpy((a), (b), (c)) #define CPU_TO_LE16(o) htole16(o) #define CPU_TO_LE32(s) htole32(s) #define CPU_TO_LE64(h) htole64(h) #define LE16_TO_CPU(a) le16toh(a) #define LE32_TO_CPU(c) le32toh(c) #define LE64_TO_CPU(k) le64toh(k) #define I40E_NTOHS(a) ntohs(a) #define I40E_NTOHL(a) ntohl(a) #define I40E_HTONS(a) htons(a) #define I40E_HTONL(a) htonl(a) #define FIELD_SIZEOF(x, y) (sizeof(((x*)0)->y)) typedef uint8_t u8; typedef int8_t s8; typedef uint16_t u16; typedef int16_t s16; typedef uint32_t u32; typedef int32_t s32; typedef uint64_t u64; /* long string relief */ typedef enum i40e_status_code i40e_status; #define __le16 u16 #define __le32 u32 #define __le64 u64 #define __be16 u16 #define __be32 u32 #define __be64 u64 /* SW spinlock */ struct i40e_spinlock { struct mtx mutex; }; #define le16_to_cpu #if defined(__amd64__) || defined(i386) static __inline void prefetch(void *x) { __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); } #else #define prefetch(x) #endif struct i40e_osdep { bus_space_tag_t mem_bus_space_tag; bus_space_handle_t mem_bus_space_handle; bus_size_t mem_bus_space_size; uint32_t flush_reg; int i2c_intfc_num; device_t dev; }; struct i40e_dma_mem { void *va; u64 pa; bus_dma_tag_t tag; bus_dmamap_t map; bus_dma_segment_t seg; bus_size_t size; int nseg; int flags; }; struct i40e_virt_mem { void *va; u32 size; }; struct i40e_hw; /* forward decl */ u16 i40e_read_pci_cfg(struct i40e_hw *, u32); void i40e_write_pci_cfg(struct i40e_hw *, u32, u16); /* ** i40e_debug - OS dependent version of shared code debug printing */ enum i40e_debug_mask; #define i40e_debug(h, m, s, ...) i40e_debug_shared(h, m, s, ##__VA_ARGS__) extern void i40e_debug_shared(struct i40e_hw *hw, enum i40e_debug_mask mask, char *fmt_str, ...); /* Non-busy-wait that uses kern_yield() */ void i40e_msec_pause(int); const char * ixl_vc_opcode_str(uint16_t op); /* ** This hardware supports either 16 or 32 byte rx descriptors; ** the driver only uses the 32 byte kind. */ #define i40e_rx_desc i40e_32byte_rx_desc static __inline uint32_t rd32_osdep(struct i40e_osdep *osdep, uint32_t reg) { KASSERT(reg < osdep->mem_bus_space_size, ("ixl: register offset %#jx too large (max is %#jx)", (uintmax_t)reg, (uintmax_t)osdep->mem_bus_space_size)); return (bus_space_read_4(osdep->mem_bus_space_tag, osdep->mem_bus_space_handle, reg)); } static __inline void wr32_osdep(struct i40e_osdep *osdep, uint32_t reg, uint32_t value) { KASSERT(reg < osdep->mem_bus_space_size, ("ixl: register offset %#jx too large (max is %#jx)", (uintmax_t)reg, (uintmax_t)osdep->mem_bus_space_size)); bus_space_write_4(osdep->mem_bus_space_tag, osdep->mem_bus_space_handle, reg, value); } static __inline void ixl_flush_osdep(struct i40e_osdep *osdep) { rd32_osdep(osdep, osdep->flush_reg); } #define rd32(a, reg) rd32_osdep((a)->back, (reg)) #define wr32(a, reg, value) wr32_osdep((a)->back, (reg), (value)) #define rd64(a, reg) (\ bus_space_read_8( ((struct i40e_osdep *)(a)->back)->mem_bus_space_tag, \ ((struct i40e_osdep *)(a)->back)->mem_bus_space_handle, \ reg)) #define wr64(a, reg, value) (\ bus_space_write_8( ((struct i40e_osdep *)(a)->back)->mem_bus_space_tag, \ ((struct i40e_osdep *)(a)->back)->mem_bus_space_handle, \ reg, value)) #define ixl_flush(a) ixl_flush_osdep((a)->back) enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, u16 *data); #endif /* _I40E_OSDEP_H_ */ Index: releng/11.3/sys/dev/ixl/i40e_prototype.h =================================================================== --- releng/11.3/sys/dev/ixl/i40e_prototype.h (revision 349180) +++ releng/11.3/sys/dev/ixl/i40e_prototype.h (revision 349181) @@ -1,590 +1,602 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _I40E_PROTOTYPE_H_ #define _I40E_PROTOTYPE_H_ #include "i40e_type.h" #include "i40e_alloc.h" #include "virtchnl.h" /* Prototypes for shared code functions that are not in * the standard function pointer structures. These are * mostly because they are needed even before the init * has happened and will assist in the early SW and FW * setup. */ /* adminq functions */ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw); enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw); enum i40e_status_code i40e_init_asq(struct i40e_hw *hw); enum i40e_status_code i40e_init_arq(struct i40e_hw *hw); enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw); enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw); enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw); enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw); u16 i40e_clean_asq(struct i40e_hw *hw); void i40e_free_adminq_asq(struct i40e_hw *hw); void i40e_free_adminq_arq(struct i40e_hw *hw); enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr); void i40e_adminq_init_ring_data(struct i40e_hw *hw); enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw, struct i40e_arq_event_info *e, u16 *events_pending); enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ u16 buff_size, struct i40e_asq_cmd_details *cmd_details); bool i40e_asq_done(struct i40e_hw *hw); /* debug function for adminq */ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, void *buffer, u16 buf_len); void i40e_idle_aq(struct i40e_hw *hw); bool i40e_check_asq_alive(struct i40e_hw *hw); enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, bool pf_lut, u8 *lut, u16 lut_size); enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid, bool pf_lut, u8 *lut, u16 lut_size); enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw, u16 seid, struct i40e_aqc_get_set_rss_key_data *key); enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw, u16 seid, struct i40e_aqc_get_set_rss_key_data *key); const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err); u32 i40e_led_get(struct i40e_hw *hw); void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink); enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on, u16 led_addr, u32 mode); enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, u16 *val); enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw, u32 time, u32 interval); +enum i40e_status_code i40e_get_phy_lpi_status(struct i40e_hw *hw, + struct i40e_hw_port_stats *stats); +enum i40e_status_code i40e_get_lpi_counters(struct i40e_hw *hw, u32 *tx_counter, + u32 *rx_counter); +enum i40e_status_code i40e_lpi_stat_update(struct i40e_hw *hw, + bool offset_loaded, u64 *tx_offset, + u64 *tx_stat, u64 *rx_offset, + u64 *rx_stat); /* admin send queue commands */ enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw, u16 *fw_major_version, u16 *fw_minor_version, u32 *fw_build, u16 *api_major_version, u16 *api_minor_version, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw, u32 reg_addr, u64 reg_val, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_debug_read_register(struct i40e_hw *hw, u32 reg_addr, u64 *reg_val, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw, bool qualified_modules, bool report_init, struct i40e_aq_get_phy_abilities_resp *abilities, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, struct i40e_aq_set_phy_config *config, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, bool atomic_reset); enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw, u16 max_frame_size, bool crc_en, u16 pacing, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw, u64 *advt_reg, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw, u64 *advt_reg, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw, u8 lb_level, u8 lb_type, u8 speed, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw, bool enable_link, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw, bool enable_lse, struct i40e_link_status *link, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw, u64 advt_reg, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw, struct i40e_driver_version *dv, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, u16 vsi_id, bool set_filter, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details, bool rx_only_promisc); enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw, u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, u16 seid, bool enable, u16 vid, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, u16 seid, bool enable, u16 vid, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, u16 seid, bool enable, u16 vid, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, u16 seid, bool enable, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, u16 downlink_seid, u8 enabled_tc, bool default_port, u16 *pveb_seid, bool enable_stats, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw, u16 veb_seid, u16 *switch_id, bool *floating, u16 *statistic_index, u16 *vebs_used, u16 *vebs_free, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id, struct i40e_aqc_add_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id, struct i40e_aqc_remove_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, struct i40e_asq_cmd_details *cmd_details, u16 *rule_id, u16 *rules_used, u16 *rules_free); enum i40e_status_code i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, struct i40e_asq_cmd_details *cmd_details, u16 *rules_used, u16 *rules_free); enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id, struct i40e_aqc_add_remove_vlan_element_data *v_list, u8 count, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id, struct i40e_aqc_add_remove_vlan_element_data *v_list, u8 count, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw, struct i40e_aqc_get_switch_config_resp *buf, u16 buf_size, u16 *start_seid, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, u16 flags, u16 valid_flags, u8 mode, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw, enum i40e_aq_resources_ids resource, enum i40e_aq_resource_access_type access, u8 sdp_number, u64 *timeout, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_release_resource(struct i40e_hw *hw, enum i40e_aq_resources_ids resource, u8 sdp_number, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, void *data, bool last_command, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, bool last_command, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_read_nvm_config(struct i40e_hw *hw, u8 cmd_flags, u32 field_id, void *data, u16 buf_size, u16 *element_count, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw, u8 cmd_flags, void *data, u16 buf_size, u16 element_count, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw, void *buff, u16 buff_size, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw, void *buff, u16 buff_size, u16 *data_size, enum i40e_admin_queue_opc list_type_opc, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, void *data, bool last_command, u8 preservation_flags, struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_rearrange_nvm(struct i40e_hw *hw, + u8 rearrange_nvm, + struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, u8 mib_type, void *buff, u16 buff_size, u16 *local_len, u16 *remote_len, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw, u8 mib_type, void *buff, u16 buff_size, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, bool enable_update, struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type, - void *buff, u16 buff_size, u16 tlv_len, - u16 *mib_len, - struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_aq_update_lldp_tlv(struct i40e_hw *hw, - u8 bridge_type, void *buff, u16 buff_size, - u16 old_len, u16 new_len, u16 offset, - u16 *mib_len, - struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw, - u8 bridge_type, void *buff, u16 buff_size, - u16 tlv_len, u16 *mib_len, - struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code +i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, + struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, + bool persist, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw, + bool persist, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, void *buff, u16 buff_size, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw, bool start_agent, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw, u16 udp_port, u8 protocol_index, u8 *filter_index, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw, u8 *num_entries, struct i40e_aqc_switch_resource_alloc_element_resp *buf, u16 count, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags, u16 mac_seid, u16 vsi_seid, u16 *ret_seid); enum i40e_status_code i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue, u16 vsi_seid, u16 tag, u16 queue_num, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid, u16 tag, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pe_seid, u16 etag, u8 num_tags_in_buf, void *buf, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pe_seid, u16 etag, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid, u16 old_tag, u16 new_tag, u16 *tags_used, u16 *tags_free, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid, u16 vlan_id, u16 *stat_index, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid, u16 vlan_id, u16 stat_index, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_port_parameters(struct i40e_hw *hw, u16 bad_frame_vsi, bool save_bad_pac, bool pad_short_pac, bool double_vlan, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw, u16 flags, u8 *mac_addr, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, u16 seid, u16 credit, u8 max_credit, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit( struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw, u16 seid, u16 credit, u8 max_bw, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_vsi_bw_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_query_port_ets_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_port_ets_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_read_lldp_cfg(struct i40e_hw *hw, struct i40e_lldp_variables *lldp_cfg); enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 vsi, struct i40e_aqc_add_remove_cloud_filters_element_data *filters, u8 filter_count); enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw, u16 vsi, struct i40e_aqc_add_remove_cloud_filters_element_data *filters, u8 filter_count); enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw, u32 reg_addr0, u32 *reg_val0, u32 reg_addr1, u32 *reg_val1); enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw, u32 addr, u32 dw_count, void *buffer); enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw, u32 reg_addr0, u32 reg_val0, u32 reg_addr1, u32 reg_val1); enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw, u32 addr, u32 dw_count, void *buffer); enum i40e_status_code i40e_aq_alternate_clear(struct i40e_hw *hw); enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw, u8 bios_mode, bool *reset_needed); enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw, u8 oem_mode); /* i40e_common */ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw); enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw); void i40e_clear_hw(struct i40e_hw *hw); void i40e_clear_pxe_mode(struct i40e_hw *hw); enum i40e_status_code i40e_get_link_status(struct i40e_hw *hw, bool *link_up); enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw); enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr); enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw, u32 *max_bw, u32 *min_bw, bool *min_valid, bool *max_valid); enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw, struct i40e_aqc_configure_partition_bw_data *bw_data, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr); enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, u32 pba_num_size); void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable); enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw); /* prototype for functions used for NVM access */ enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw); enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw, enum i40e_aq_resource_access_type access); void i40e_release_nvm(struct i40e_hw *hw); enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data); enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data); enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module, u32 offset, u16 words, void *data, bool last_command); enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data); enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data); enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset, void *data); enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw, u8 module, u32 offset, u16 words, void *data); enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum); enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw); enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw, u16 *checksum); enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *); void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, struct i40e_aq_desc *desc); void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw); void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); +enum i40e_status_code i40e_enable_eee(struct i40e_hw *hw, bool enable); enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw); extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; static INLINE struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) { return i40e_ptype_lookup[ptype]; } /** * i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition * @link_speed: the speed to convert * * Returns the link_speed in terms of the virtchnl interface, for use in * converting link_speed as reported by the AdminQ into the format used for * talking to virtchnl devices. If we can't represent the link speed properly, * report LINK_SPEED_UNKNOWN. **/ static INLINE enum virtchnl_link_speed i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed) { switch (link_speed) { case I40E_LINK_SPEED_100MB: return VIRTCHNL_LINK_SPEED_100MB; case I40E_LINK_SPEED_1GB: return VIRTCHNL_LINK_SPEED_1GB; + case I40E_LINK_SPEED_2_5GB: + return VIRTCHNL_LINK_SPEED_2_5GB; + case I40E_LINK_SPEED_5GB: + return VIRTCHNL_LINK_SPEED_5GB; case I40E_LINK_SPEED_10GB: return VIRTCHNL_LINK_SPEED_10GB; case I40E_LINK_SPEED_40GB: return VIRTCHNL_LINK_SPEED_40GB; case I40E_LINK_SPEED_20GB: return VIRTCHNL_LINK_SPEED_20GB; case I40E_LINK_SPEED_25GB: return VIRTCHNL_LINK_SPEED_25GB; case I40E_LINK_SPEED_UNKNOWN: default: return VIRTCHNL_LINK_SPEED_UNKNOWN; } } /* prototype for functions used for SW spinlocks */ void i40e_init_spinlock(struct i40e_spinlock *sp); void i40e_acquire_spinlock(struct i40e_spinlock *sp); void i40e_release_spinlock(struct i40e_spinlock *sp); void i40e_destroy_spinlock(struct i40e_spinlock *sp); /* i40e_common for VF drivers*/ void i40e_vf_parse_hw_config(struct i40e_hw *hw, struct virtchnl_vf_resource *msg); enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw); enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw, enum virtchnl_ops v_opcode, enum i40e_status_code v_retval, u8 *msg, u16 msglen, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw, struct i40e_filter_control_settings *settings); enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, u8 *mac_addr, u16 ethtype, u16 flags, u16 vsi_seid, u16 queue, bool is_add, struct i40e_control_filter_stats *stats, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, u8 table_id, u32 start_index, u16 buff_size, void *buff, u16 *ret_buff_size, u8 *ret_next_table, u32 *ret_next_index, struct i40e_asq_cmd_details *cmd_details); void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, u16 vsi_seid); enum i40e_status_code i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, u32 reg_addr, u32 *reg_val, struct i40e_asq_cmd_details *cmd_details); u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr); enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, u32 reg_addr, u32 reg_val, struct i40e_asq_cmd_details *cmd_details); void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val); enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw, - u8 phy_select, u8 dev_addr, + u8 phy_select, u8 dev_addr, bool page_change, u32 reg_addr, u32 reg_val, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw, - u8 phy_select, u8 dev_addr, + u8 phy_select, u8 dev_addr, bool page_change, u32 reg_addr, u32 *reg_val, struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code +i40e_aq_run_phy_activity(struct i40e_hw *hw, u16 activity_id, u32 opcode, + u32 *cmd_status, u32 *data0, u32 *data1, + struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw, struct i40e_aqc_arp_proxy_data *proxy_config, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw, struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw, u8 filter_index, struct i40e_aqc_set_wol_filter_data *filter, bool set_filter, bool no_wol_tco, bool filter_valid, bool no_wol_tco_valid, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw, u16 *wake_reason, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw, u16 reg, u8 phy_addr, u16 *value); enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw, u16 reg, u8 phy_addr, u16 value); enum i40e_status_code i40e_read_phy_register_clause45(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 *value); enum i40e_status_code i40e_write_phy_register_clause45(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 value); enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 *value); enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 value); u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); #endif /* _I40E_PROTOTYPE_H_ */ Index: releng/11.3/sys/dev/ixl/i40e_register.h =================================================================== --- releng/11.3/sys/dev/ixl/i40e_register.h (revision 349180) +++ releng/11.3/sys/dev/ixl/i40e_register.h (revision 349181) @@ -1,5363 +1,5363 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _I40E_REGISTER_H_ #define _I40E_REGISTER_H_ #define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */ #define I40E_GL_ARQBAH_ARQBAH_SHIFT 0 #define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT) #define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */ #define I40E_GL_ARQBAL_ARQBAL_SHIFT 0 #define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT) #define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */ #define I40E_GL_ARQH_ARQH_SHIFT 0 #define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT) #define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */ #define I40E_GL_ARQT_ARQT_SHIFT 0 #define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT) #define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */ #define I40E_GL_ATQBAH_ATQBAH_SHIFT 0 #define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT) #define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */ #define I40E_GL_ATQBAL_ATQBAL_SHIFT 0 #define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT) #define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */ #define I40E_GL_ATQH_ATQH_SHIFT 0 #define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT) #define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */ #define I40E_GL_ATQLEN_ATQLEN_SHIFT 0 #define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT) #define I40E_GL_ATQLEN_ATQVFE_SHIFT 28 #define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT) #define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29 #define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT) #define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30 #define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT) #define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31 #define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT) #define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */ #define I40E_GL_ATQT_ATQT_SHIFT 0 #define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT) #define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */ #define I40E_PF_ARQBAH_ARQBAH_SHIFT 0 #define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT) #define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */ #define I40E_PF_ARQBAL_ARQBAL_SHIFT 0 #define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT) #define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */ #define I40E_PF_ARQH_ARQH_SHIFT 0 #define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT) #define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */ #define I40E_PF_ARQLEN_ARQLEN_SHIFT 0 #define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT) #define I40E_PF_ARQLEN_ARQVFE_SHIFT 28 #define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT) #define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29 #define I40E_PF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT) #define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30 #define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT) #define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31 #define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT) #define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */ #define I40E_PF_ARQT_ARQT_SHIFT 0 #define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT) #define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */ #define I40E_PF_ATQBAH_ATQBAH_SHIFT 0 #define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT) #define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */ #define I40E_PF_ATQBAL_ATQBAL_SHIFT 0 #define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT) #define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */ #define I40E_PF_ATQH_ATQH_SHIFT 0 #define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT) #define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */ #define I40E_PF_ATQLEN_ATQLEN_SHIFT 0 #define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT) #define I40E_PF_ATQLEN_ATQVFE_SHIFT 28 #define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT) #define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29 #define I40E_PF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT) #define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30 #define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT) #define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31 #define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT) #define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */ #define I40E_PF_ATQT_ATQT_SHIFT 0 #define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT) #define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ #define I40E_VF_ARQBAH_MAX_INDEX 127 #define I40E_VF_ARQBAH_ARQBAH_SHIFT 0 #define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT) #define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ #define I40E_VF_ARQBAL_MAX_INDEX 127 #define I40E_VF_ARQBAL_ARQBAL_SHIFT 0 #define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT) #define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ #define I40E_VF_ARQH_MAX_INDEX 127 #define I40E_VF_ARQH_ARQH_SHIFT 0 #define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT) #define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ #define I40E_VF_ARQLEN_MAX_INDEX 127 #define I40E_VF_ARQLEN_ARQLEN_SHIFT 0 #define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT) #define I40E_VF_ARQLEN_ARQVFE_SHIFT 28 #define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT) #define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29 #define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT) #define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30 #define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT) #define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31 #define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT) #define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ #define I40E_VF_ARQT_MAX_INDEX 127 #define I40E_VF_ARQT_ARQT_SHIFT 0 #define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT) #define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ #define I40E_VF_ATQBAH_MAX_INDEX 127 #define I40E_VF_ATQBAH_ATQBAH_SHIFT 0 #define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT) #define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ #define I40E_VF_ATQBAL_MAX_INDEX 127 #define I40E_VF_ATQBAL_ATQBAL_SHIFT 0 #define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT) #define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ #define I40E_VF_ATQH_MAX_INDEX 127 #define I40E_VF_ATQH_ATQH_SHIFT 0 #define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT) #define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ #define I40E_VF_ATQLEN_MAX_INDEX 127 #define I40E_VF_ATQLEN_ATQLEN_SHIFT 0 #define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT) #define I40E_VF_ATQLEN_ATQVFE_SHIFT 28 #define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT) #define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29 #define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT) #define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30 #define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT) #define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31 #define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT) #define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ #define I40E_VF_ATQT_MAX_INDEX 127 #define I40E_VF_ATQT_ATQT_SHIFT 0 #define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT) #define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */ #define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0 #define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT) #define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */ #define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0 #define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT) #define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4 #define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT) #define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8 #define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT) #define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */ #define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0 #define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT) #define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4 #define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT) #define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8 #define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT) #define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16 #define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT) #define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24 #define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT) #define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */ #define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0 #define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT) #define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12 #define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT) #define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15 #define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT) #define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17 #define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT) #define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_PFCM_LANCTXDATA_MAX_INDEX 3 #define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0 #define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT) #define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */ #define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0 #define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT) #define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1 #define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT) #define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ #define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127 #define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0 #define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT) #define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4 #define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT) #define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8 #define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT) #define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ #define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127 #define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0 #define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT) #define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4 #define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT) #define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8 #define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT) #define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16 #define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT) #define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24 #define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT) #define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */ #define I40E_GLDCB_GENC_PCIRTT_SHIFT 0 #define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT) #define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */ #define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0 #define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT) #define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */ #define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3 #define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT) #define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */ #define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0 #define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT) #define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */ #define I40E_PRTDCB_FCTTVN_MAX_INDEX 3 #define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0 #define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT) #define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16 #define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT) #define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */ #define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0 #define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT) #define I40E_PRTDCB_GENC_NUMTC_SHIFT 2 #define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT) #define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6 #define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT) #define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9 #define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT) #define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16 #define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT) #define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */ #define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0 #define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT) #define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */ #define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0 #define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT) #define I40E_PRTDCB_MFLCN_DPF_SHIFT 1 #define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT) #define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2 #define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT) #define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3 #define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT) #define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4 #define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT) #define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */ #define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0 #define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT) #define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1 #define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT) #define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2 #define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT) #define I40E_PRTDCB_RETSC_LLTC_SHIFT 8 #define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT) #define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ #define I40E_PRTDCB_RETSTCC_MAX_INDEX 7 #define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0 #define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30 #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) #define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31 #define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) #define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */ #define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0 #define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT) #define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8 #define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT) #define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16 #define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT) #define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */ #define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0 #define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT) #define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */ #define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0 #define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT) #define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3 #define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT) #define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6 #define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT) #define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9 #define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT) #define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12 #define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT) #define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15 #define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT) #define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18 #define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) #define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 #define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) #define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ #define I40E_PRTDCB_RUPTQ_MAX_INDEX 7 #define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0 #define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT) #define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ #define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 #define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) #define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ #define I40E_PRTDCB_TCMSTC_MAX_INDEX 7 #define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0 #define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT) #define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */ #define I40E_PRTDCB_TCPMC_CPM_SHIFT 0 #define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT) #define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13 #define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT) #define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30 #define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT) #define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ #define I40E_PRTDCB_TCWSTC_MAX_INDEX 7 #define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0 #define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT) #define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */ #define I40E_PRTDCB_TDPMC_DPM_SHIFT 0 #define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT) #define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30 #define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT) #define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */ #define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0 #define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT) #define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8 #define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT) #define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */ #define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0 #define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT) #define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8 #define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT) #define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */ #define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0 #define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT) #define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8 #define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT) #define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9 #define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT) #define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10 #define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT) #define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11 #define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT) #define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12 #define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT) #define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13 #define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT) #define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14 #define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT) #define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15 #define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT) #define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */ #define I40E_PRTDCB_TPFCTS_MAX_INDEX 7 #define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0 #define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT) #define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */ #define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0 #define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT) #define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4 #define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT) #define I40E_GLFCOE_RCTL_ICRC_SHIFT 5 #define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT) #define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16 #define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT) #define I40E_GL_FWSTS 0x00083048 /* Reset: POR */ #define I40E_GL_FWSTS_FWS0B_SHIFT 0 #define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT) #define I40E_GL_FWSTS_FWRI_SHIFT 9 #define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT) #define I40E_GL_FWSTS_FWS1B_SHIFT 16 #define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT) #define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */ #define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0 #define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT) #define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4 #define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT) #define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8 #define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT) #define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12 #define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT) #define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16 #define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT) #define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20 #define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT) #define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */ #define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT) #define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT) #define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4 #define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT) #define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5 #define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT) #define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6 #define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT) #define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7 #define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) #define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10 #define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT) #define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11 #define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT) #define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12 #define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) #define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17 #define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT) #define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19 #define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) #define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26 #define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT) #define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ #define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 #define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) #define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5 #define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT) #define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6 #define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT) #define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */ #define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0 #define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT) #define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */ #define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0 #define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT) #define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_I2CCMD_MAX_INDEX 3 #define I40E_GLGEN_I2CCMD_DATA_SHIFT 0 #define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT) #define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16 #define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT) #define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24 #define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT) #define I40E_GLGEN_I2CCMD_OP_SHIFT 27 #define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT) #define I40E_GLGEN_I2CCMD_RESET_SHIFT 28 #define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT) #define I40E_GLGEN_I2CCMD_R_SHIFT 29 #define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT) #define I40E_GLGEN_I2CCMD_E_SHIFT 31 #define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT) #define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3 #define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0 #define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT) #define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5 #define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT) #define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8 #define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT) #define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9 #define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT) #define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10 #define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT) #define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11 #define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT) #define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12 #define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT) #define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13 #define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT) #define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14 #define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT) #define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15 #define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT) #define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31 #define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT) #define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */ #define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0 #define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT) #define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3 #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0 #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT) #define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 #define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29 #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT) #define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT) #define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1 #define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) #define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5 #define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT) #define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10 #define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT) #define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15 #define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT) #define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20 #define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT) #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25 #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT) #define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31 #define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT) #define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_MSCA_MAX_INDEX 3 #define I40E_GLGEN_MSCA_MDIADD_SHIFT 0 #define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT) #define I40E_GLGEN_MSCA_DEVADD_SHIFT 16 #define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT) #define I40E_GLGEN_MSCA_PHYADD_SHIFT 21 #define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT) #define I40E_GLGEN_MSCA_OPCODE_SHIFT 26 #define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_GLGEN_MSCA_STCODE_SHIFT 28 #define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT) #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30 #define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT) #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31 #define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) #define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_MSRWD_MAX_INDEX 3 #define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0 #define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT) #define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16 #define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT) #define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */ #define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0 #define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT) #define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16 #define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT) #define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */ #define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0 #define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT) #define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2 #define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT) #define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4 #define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT) #define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6 #define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT) #define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8 #define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT) #define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10 #define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT) #define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */ #define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0 #define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) #define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ #define I40E_GLGEN_RTRIG_CORER_SHIFT 0 #define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) #define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1 #define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT) #define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2 #define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT) #define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */ #define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0 #define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT) #define I40E_GLGEN_STAT_DCBEN_SHIFT 2 #define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT) #define I40E_GLGEN_STAT_VTEN_SHIFT 3 #define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT) #define I40E_GLGEN_STAT_FCOEN_SHIFT 4 #define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT) #define I40E_GLGEN_STAT_EVBEN_SHIFT 5 #define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT) #define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6 #define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT) #define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3 #define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0 #define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT) #define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */ #define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0 #define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT) #define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */ #define I40E_PFGEN_CTRL_PFSWR_SHIFT 0 #define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT) #define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */ #define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0 #define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT) #define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */ #define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0 #define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT) #define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */ #define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0 #define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT) #define I40E_PFGEN_STATE_PFFCEN_SHIFT 1 #define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT) #define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2 #define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT) #define I40E_PFGEN_STATE_PFSCEN_SHIFT 3 #define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT) #define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */ #define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0 #define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT) #define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1 #define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT) #define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2 #define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT) #define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */ #define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0 #define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT) #define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */ #define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0 #define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT) #define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1 #define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT) #define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ #define I40E_VFGEN_RSTAT1_MAX_INDEX 127 #define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0 #define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT) #define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VPGEN_VFRSTAT_MAX_INDEX 127 #define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0 #define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT) #define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VPGEN_VFRTRIG_MAX_INDEX 127 #define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0 #define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT) #define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ #define I40E_VSIGEN_RSTAT_MAX_INDEX 383 #define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0 #define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT) #define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ #define I40E_VSIGEN_RTRIG_MAX_INDEX 383 #define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0 #define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT) #define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15 #define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0 #define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT) #define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15 #define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0 #define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT) #define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */ #define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0 #define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT) #define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15 #define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0 #define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT) #define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15 #define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0 #define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT) #define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */ #define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0 #define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT) #define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */ #define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0 #define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT) #define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */ #define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0 #define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT) #define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15 #define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0 #define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT) #define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15 #define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0 #define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT) #define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29 #define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT) #define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */ #define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0 #define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT) #define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */ #define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0 #define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT) #define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15 #define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0 #define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT) #define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15 #define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0 #define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT) #define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */ #define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0 #define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT) #define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */ #define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0 #define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT) #define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */ #define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0 #define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT) #define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_LANRXBASE_MAX_INDEX 15 #define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0 #define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT) #define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_LANRXCNT_MAX_INDEX 15 #define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0 #define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT) #define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */ #define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0 #define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT) #define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_LANTXBASE_MAX_INDEX 15 #define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0 #define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT) #define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24 #define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT) #define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_LANTXCNT_MAX_INDEX 15 #define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0 #define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT) #define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */ #define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0 #define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT) #define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_PFASSIGN_MAX_INDEX 15 #define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0 #define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT) #define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_SDPART_MAX_INDEX 15 #define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0 #define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT) #define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16 #define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT) #define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */ #define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0 #define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT) #define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */ #define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0 #define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT) #define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7 #define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT) #define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8 #define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT) #define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16 #define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT) #define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31 #define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT) #define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */ #define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0 #define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT) #define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16 #define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT) #define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */ #define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0 #define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT) #define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31 #define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT) #define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */ #define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0 #define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT) #define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */ #define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0 #define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT) #define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1 #define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) #define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2 #define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) #define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12 #define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT) #define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */ #define I40E_GL_GP_FUSE_MAX_INDEX 28 #define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0 #define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT) #define I40E_GL_UFUSE 0x00094008 /* Reset: POR */ #define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1 #define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT) #define I40E_GL_UFUSE_NIC_ID_SHIFT 2 #define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT) #define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10 #define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT) #define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11 #define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT) #define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */ #define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0 #define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1 #define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2 #define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3 #define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4 #define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5 #define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6 #define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7 #define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8 #define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9 #define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10 #define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11 #define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12 #define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13 #define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14 #define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15 #define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16 #define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17 #define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18 #define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19 #define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20 #define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21 #define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22 #define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23 #define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24 #define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25 #define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26 #define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27 #define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28 #define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT) #define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29 #define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT) #define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */ #define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0 #define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT) #define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4 #define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT) #define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */ #define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0 #define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) #define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11 #define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT) #define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13 #define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT) #define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30 #define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT) #define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31 #define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT) #define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */ #define I40E_PFINT_CEQCTL_MAX_INDEX 511 #define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0 #define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) #define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11 #define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) #define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13 #define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT) #define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16 #define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT) #define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27 #define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT) #define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30 #define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT) #define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31 #define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT) #define I40E_GLINT_CTL 0x0003F800 /* Reset: CORER */ #define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT 0 #define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT) #define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT 1 #define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT) #define I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT 2 #define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT) #define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */ #define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0 #define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT) #define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1 #define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT) #define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2 #define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT) #define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3 #define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) #define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5 #define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT) #define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24 #define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT) #define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25 #define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT) #define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31 #define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT) #define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ #define I40E_PFINT_DYN_CTLN_MAX_INDEX 511 #define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0 #define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT) #define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1 #define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT) #define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2 #define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT) #define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3 #define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) #define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5 #define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT) #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) #define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31 #define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT) #define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */ #define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0 #define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1 #define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2 #define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3 #define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4 #define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5 #define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6 #define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7 #define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8 #define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9 #define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10 #define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11 #define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12 #define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13 #define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14 #define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15 #define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16 #define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17 #define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18 #define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19 #define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20 #define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21 #define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22 #define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23 #define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24 #define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25 #define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26 #define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27 #define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28 #define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT) #define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29 #define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT) #define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */ #define I40E_PFINT_ICR0_INTEVENT_SHIFT 0 #define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT) #define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1 #define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT) #define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2 #define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT) #define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3 #define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT) #define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4 #define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT) #define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5 #define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT) #define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6 #define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT) #define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7 #define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT) #define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8 #define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT) #define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16 #define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT) #define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19 #define I40E_PFINT_ICR0_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_MAL_DETECT_SHIFT) #define I40E_PFINT_ICR0_GRST_SHIFT 20 #define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT) #define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21 #define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT) #define I40E_PFINT_ICR0_GPIO_SHIFT 22 #define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT) #define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23 #define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT) #define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24 #define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT) #define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 #define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT) #define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26 #define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT) #define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28 #define I40E_PFINT_ICR0_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PE_CRITERR_SHIFT) #define I40E_PFINT_ICR0_VFLR_SHIFT 29 #define I40E_PFINT_ICR0_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_VFLR_SHIFT) #define I40E_PFINT_ICR0_ADMINQ_SHIFT 30 #define I40E_PFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ADMINQ_SHIFT) #define I40E_PFINT_ICR0_SWINT_SHIFT 31 #define I40E_PFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_SWINT_SHIFT) #define I40E_PFINT_ICR0_ENA 0x00038800 /* Reset: CORER */ #define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16 #define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT) #define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19 #define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT) #define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20 #define I40E_PFINT_ICR0_ENA_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GRST_SHIFT) #define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21 #define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT) #define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22 #define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT) #define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23 #define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT) #define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24 #define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT) #define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 #define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) #define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26 #define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT) #define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28 #define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT) #define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29 #define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT) #define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30 #define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT) #define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31 #define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT) #define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */ #define I40E_PFINT_ITR0_MAX_INDEX 2 #define I40E_PFINT_ITR0_INTERVAL_SHIFT 0 #define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT) #define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */ #define I40E_PFINT_ITRN_MAX_INDEX 2 #define I40E_PFINT_ITRN_INTERVAL_SHIFT 0 #define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT) #define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */ #define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0 #define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) #define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11 #define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT) #define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ #define I40E_PFINT_LNKLSTN_MAX_INDEX 511 #define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0 #define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) #define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11 #define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) #define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */ #define I40E_PFINT_RATE0_INTERVAL_SHIFT 0 #define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT) #define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6 #define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT) #define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ #define I40E_PFINT_RATEN_MAX_INDEX 511 #define I40E_PFINT_RATEN_INTERVAL_SHIFT 0 #define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) #define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 #define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) #define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */ #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) #define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ #define I40E_QINT_RQCTL_MAX_INDEX 1535 #define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0 #define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT) #define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11 #define I40E_QINT_RQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_ITR_INDX_SHIFT) #define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13 #define I40E_QINT_RQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) #define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16 #define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) #define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27 #define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) #define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30 #define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) #define I40E_QINT_RQCTL_INTEVENT_SHIFT 31 #define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT) #define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ #define I40E_QINT_TQCTL_MAX_INDEX 1535 #define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0 #define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT) #define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11 #define I40E_QINT_TQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_ITR_INDX_SHIFT) #define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13 #define I40E_QINT_TQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) #define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16 #define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) #define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27 #define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) #define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30 #define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT) #define I40E_QINT_TQCTL_INTEVENT_SHIFT 31 #define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT) #define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ #define I40E_VFINT_DYN_CTL0_MAX_INDEX 127 #define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0 #define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT) #define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1 #define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT) #define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2 #define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT) #define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3 #define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) #define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5 #define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT) #define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24 #define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT) #define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25 #define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT) #define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31 #define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT) #define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ #define I40E_VFINT_DYN_CTLN_MAX_INDEX 511 #define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0 #define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT) #define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1 #define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT) #define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2 #define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT) #define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3 #define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT) #define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5 #define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT) #define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 #define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) #define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 #define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) #define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31 #define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT) #define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VFINT_ICR0_MAX_INDEX 127 #define I40E_VFINT_ICR0_INTEVENT_SHIFT 0 #define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT) #define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1 #define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT) #define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2 #define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT) #define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3 #define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT) #define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4 #define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT) #define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 #define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT) #define I40E_VFINT_ICR0_ADMINQ_SHIFT 30 #define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT) #define I40E_VFINT_ICR0_SWINT_SHIFT 31 #define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT) #define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VFINT_ICR0_ENA_MAX_INDEX 127 #define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 #define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) #define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30 #define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT) #define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31 #define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT) #define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */ #define I40E_VFINT_ITR0_MAX_INDEX 2 #define I40E_VFINT_ITR0_INTERVAL_SHIFT 0 #define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT) #define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */ #define I40E_VFINT_ITRN_MAX_INDEX 2 #define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 #define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) #define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) #define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VPINT_AEQCTL_MAX_INDEX 127 #define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0 #define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) #define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11 #define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT) #define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13 #define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT) #define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30 #define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT) #define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31 #define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT) #define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */ #define I40E_VPINT_CEQCTL_MAX_INDEX 511 #define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0 #define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) #define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11 #define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) #define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13 #define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT) #define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16 #define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT) #define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27 #define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) #define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30 #define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT) #define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31 #define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT) #define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ #define I40E_VPINT_LNKLST0_MAX_INDEX 127 #define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0 #define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) #define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11 #define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT) #define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ #define I40E_VPINT_LNKLSTN_MAX_INDEX 511 #define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0 #define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) #define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11 #define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) #define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ #define I40E_VPINT_RATE0_MAX_INDEX 127 #define I40E_VPINT_RATE0_INTERVAL_SHIFT 0 #define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT) #define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6 #define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT) #define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ #define I40E_VPINT_RATEN_MAX_INDEX 511 #define I40E_VPINT_RATEN_INTERVAL_SHIFT 0 #define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT) #define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6 #define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT) #define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */ #define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0 #define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT) #define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1 #define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT) #define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */ #define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0 #define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT) #define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */ #define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0 #define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT) #define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */ #define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0 #define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT) #define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */ #define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0 #define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT) #define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */ #define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11 #define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0 #define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT) #define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16 #define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT) #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30 #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT) #define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31 #define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT) #define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */ #define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0 #define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) #define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16 #define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT) #define I40E_PFLAN_QALLOC_VALID_SHIFT 31 #define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT) #define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ #define I40E_QRX_ENA_MAX_INDEX 1535 #define I40E_QRX_ENA_QENA_REQ_SHIFT 0 #define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT) #define I40E_QRX_ENA_FAST_QDIS_SHIFT 1 #define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT) #define I40E_QRX_ENA_QENA_STAT_SHIFT 2 #define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT) #define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ #define I40E_QRX_TAIL_MAX_INDEX 1535 #define I40E_QRX_TAIL_TAIL_SHIFT 0 #define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT) #define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ #define I40E_QTX_CTL_MAX_INDEX 1535 #define I40E_QTX_CTL_PFVF_Q_SHIFT 0 #define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT) #define I40E_QTX_CTL_PF_INDX_SHIFT 2 #define I40E_QTX_CTL_PF_INDX_MASK I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT) #define I40E_QTX_CTL_VFVM_INDX_SHIFT 7 #define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT) #define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ #define I40E_QTX_ENA_MAX_INDEX 1535 #define I40E_QTX_ENA_QENA_REQ_SHIFT 0 #define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT) #define I40E_QTX_ENA_FAST_QDIS_SHIFT 1 #define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT) #define I40E_QTX_ENA_QENA_STAT_SHIFT 2 #define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT) #define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ #define I40E_QTX_HEAD_MAX_INDEX 1535 #define I40E_QTX_HEAD_HEAD_SHIFT 0 #define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT) #define I40E_QTX_HEAD_RS_PENDING_SHIFT 16 #define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT) #define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ #define I40E_QTX_TAIL_MAX_INDEX 1535 #define I40E_QTX_TAIL_TAIL_SHIFT 0 #define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT) #define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ #define I40E_VPLAN_MAPENA_MAX_INDEX 127 #define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0 #define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT) #define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */ #define I40E_VPLAN_QTABLE_MAX_INDEX 15 #define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0 #define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT) #define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */ #define I40E_VSILAN_QBASE_MAX_INDEX 383 #define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0 #define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT) #define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11 #define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT) #define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */ #define I40E_VSILAN_QTABLE_MAX_INDEX 7 #define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0 #define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT) #define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16 #define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) #define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */ #define I40E_PRTGL_SAH_FC_SAH_SHIFT 0 #define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT) #define I40E_PRTGL_SAH_MFS_SHIFT 16 #define I40E_PRTGL_SAH_MFS_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_MFS_SHIFT) #define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */ #define I40E_PRTGL_SAL_FC_SAL_SHIFT 0 #define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT) #define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */ #define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0 #define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT) #define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */ #define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0 #define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT) #define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */ #define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0 #define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT) #define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */ #define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0 #define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT) #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */ #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT) #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */ #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT) #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */ #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT) #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */ #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT) #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */ #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT) #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */ #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT) #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */ #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT) #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */ #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT) #define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */ #define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0 #define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT) #define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */ #define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0 #define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT) #define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */ #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT) #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT) #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT) #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT) #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT) #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT) #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT) #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT) #define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */ #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT) #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT) #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT) #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT) #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT) #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT) #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT) #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT) #define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */ #define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0 #define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT) #define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */ #define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0 #define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT) #define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10 #define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT) #define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11 #define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT) #define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15 #define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT) #define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16 #define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT) #define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19 #define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT) #define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26 #define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT) #define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27 #define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT) #define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28 #define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT) #define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29 #define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT) #define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */ #define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0 #define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT) #define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */ #define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31 #define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0 #define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT) #define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */ #define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0 #define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT) #define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ #define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7 #define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0 #define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT) #define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */ #define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0 #define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT) #define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1 #define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT) #define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17 #define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT) #define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19 #define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT) #define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25 #define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT) #define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26 #define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT) #define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28 #define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT) #define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29 #define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT) #define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ #define I40E_PRT_MNG_MAVTV_MAX_INDEX 7 #define I40E_PRT_MNG_MAVTV_VID_SHIFT 0 #define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT) #define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ #define I40E_PRT_MNG_MDEF_MAX_INDEX 7 #define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0 #define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT) #define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4 #define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT) #define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5 #define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT) #define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13 #define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT) #define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17 #define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT) #define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21 #define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT) #define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25 #define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT) #define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26 #define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT) #define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27 #define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT) #define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28 #define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT) #define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29 #define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT) #define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30 #define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT) #define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31 #define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT) #define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ #define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7 #define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0 #define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT) #define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4 #define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT) #define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8 #define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT) #define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24 #define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT) #define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25 #define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT) #define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26 #define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT) #define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27 #define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT) #define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28 #define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT) #define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29 #define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT) #define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30 #define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT) #define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31 #define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT) #define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ #define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3 #define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0 #define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT) #define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16 #define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT) #define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ #define I40E_PRT_MNG_METF_MAX_INDEX 3 #define I40E_PRT_MNG_METF_ETYPE_SHIFT 0 #define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT) #define I40E_PRT_MNG_METF_POLARITY_SHIFT 30 #define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT) #define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */ #define I40E_PRT_MNG_MFUTP_MAX_INDEX 15 #define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0 #define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT) #define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16 #define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT) #define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17 #define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT) #define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18 #define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT) #define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ #define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3 #define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0 #define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT) #define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */ #define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15 #define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0 #define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT) #define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ #define I40E_PRT_MNG_MMAH_MAX_INDEX 3 #define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0 #define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT) #define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ #define I40E_PRT_MNG_MMAL_MAX_INDEX 3 #define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0 #define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT) #define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */ #define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0 #define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT) #define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */ #define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0 #define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT) #define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1 #define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT) #define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2 #define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT) #define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3 #define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT) #define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4 #define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT) #define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5 #define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT) #define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6 #define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT) #define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7 #define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT) #define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */ #define I40E_MSIX_PBA_MAX_INDEX 5 #define I40E_MSIX_PBA_PENBIT_SHIFT 0 #define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT) #define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ #define I40E_MSIX_TADD_MAX_INDEX 128 #define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0 #define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT) #define I40E_MSIX_TADD_MSIXTADD_SHIFT 2 #define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT) #define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ #define I40E_MSIX_TMSG_MAX_INDEX 128 #define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0 #define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT) #define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ #define I40E_MSIX_TUADD_MAX_INDEX 128 #define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0 #define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT) #define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ #define I40E_MSIX_TVCTRL_MAX_INDEX 128 #define I40E_MSIX_TVCTRL_MASK_SHIFT 0 #define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT) #define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */ #define I40E_VFMSIX_PBA1_MAX_INDEX 19 #define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0 #define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT) #define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ #define I40E_VFMSIX_TADD1_MAX_INDEX 639 #define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0 #define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT) #define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2 #define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT) #define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ #define I40E_VFMSIX_TMSG1_MAX_INDEX 639 #define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0 #define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT) #define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ #define I40E_VFMSIX_TUADD1_MAX_INDEX 639 #define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0 #define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT) #define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ #define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639 #define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0 #define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT) #define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */ #define I40E_GLNVM_FLA_FL_SCK_SHIFT 0 #define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT) #define I40E_GLNVM_FLA_FL_CE_SHIFT 1 #define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT) #define I40E_GLNVM_FLA_FL_SI_SHIFT 2 #define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT) #define I40E_GLNVM_FLA_FL_SO_SHIFT 3 #define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT) #define I40E_GLNVM_FLA_FL_REQ_SHIFT 4 #define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT) #define I40E_GLNVM_FLA_FL_GNT_SHIFT 5 #define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT) #define I40E_GLNVM_FLA_LOCKED_SHIFT 6 #define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT) #define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18 #define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT) #define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30 #define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT) #define I40E_GLNVM_FLA_FL_DER_SHIFT 31 #define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT) #define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */ #define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0 #define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT) #define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31 #define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT) #define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */ #define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0 #define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT) #define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5 #define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT) #define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8 #define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT) #define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23 #define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT) #define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25 #define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT) #define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */ #define I40E_GLNVM_PROTCSR_MAX_INDEX 59 #define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0 #define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT) #define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */ #define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0 #define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT) #define I40E_GLNVM_SRCTL_ADDR_SHIFT 14 #define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT) #define I40E_GLNVM_SRCTL_WRITE_SHIFT 29 #define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT) #define I40E_GLNVM_SRCTL_START_SHIFT 30 #define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT) #define I40E_GLNVM_SRCTL_DONE_SHIFT 31 #define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT) #define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */ #define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0 #define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT) #define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16 #define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT) #define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */ #define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0 #define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT) #define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1 #define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT) #define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2 #define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT) #define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3 #define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT) #define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4 #define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT) #define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5 #define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT) #define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6 #define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT) #define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7 #define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT) #define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8 #define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT) #define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9 #define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT) #define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */ #define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0 #define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT) #define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */ #define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0 #define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT) #define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */ #define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0 #define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT) #define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */ #define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0 #define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT) #define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2 #define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT) #define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3 #define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT) #define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4 #define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT) #define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5 #define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT) #define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6 #define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT) #define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7 #define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT) #define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16 #define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT) #define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17 #define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT) #define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18 #define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT) #define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19 #define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT) #define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20 #define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT) #define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30 #define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT) #define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31 #define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT) #define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */ #define I40E_GLPCI_CNF_FLEX10_SHIFT 1 #define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT) #define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2 #define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT) #define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */ #define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0 #define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT) #define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1 #define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT) #define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2 #define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT) #define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13 #define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT) #define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */ #define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0 #define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT) #define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */ #define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0 #define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT) #define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1 #define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT) #define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2 #define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT) #define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3 #define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT) #define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4 #define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT) #define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5 #define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT) #define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6 #define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT) #define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7 #define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT) #define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8 #define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT) #define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9 #define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT) #define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14 #define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT) #define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15 #define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT) #define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28 #define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT) #define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29 #define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT) #define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30 #define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT) #define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31 #define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT) #define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */ #define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0 #define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT) #define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8 #define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT) #define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16 #define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT) #define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24 #define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT) #define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */ #define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3 #define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0 #define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT) #define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16 #define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT) #define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */ #define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) #define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ #define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 #define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) #define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1 #define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT) #define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3 #define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT) #define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4 #define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT) #define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6 #define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT) #define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10 #define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT) #define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11 #define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT) #define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */ #define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0 #define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT) #define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6 #define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT) #define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9 #define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT) #define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */ #define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0 #define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT) #define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */ #define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0 #define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT) #define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */ #define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0 #define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT) #define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16 #define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT) #define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */ #define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0 #define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT) #define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16 #define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT) #define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */ #define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0 #define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT) #define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2 #define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT) #define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5 #define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT) #define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8 #define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT) #define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11 #define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT) #define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14 #define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT) #define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15 #define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT) #define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */ #define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0 #define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT) #define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8 #define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT) #define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */ #define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0 #define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT) #define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8 #define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT) #define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16 #define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT) #define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24 #define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT) #define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */ #define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0 #define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT) #define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */ #define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0 #define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT) #define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */ #define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0 #define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT) #define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */ #define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0 #define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT) #define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */ #define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0 #define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT) #define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */ #define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0 #define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT) #define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */ #define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1 #define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT) #define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */ #define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0 #define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT) #define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */ #define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0 #define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) #define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */ #define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9 #define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT) #define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11 #define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT) #define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) #define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3 #define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT) #define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8 #define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT) #define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */ #define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0 #define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT) #define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12 #define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT) #define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */ #define I40E_PF_PCI_CIAD_DATA_SHIFT 0 #define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT) #define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */ #define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0 #define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT) #define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1 #define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT) #define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2 #define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT) #define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */ #define I40E_PFPCI_CNF_MSI_EN_SHIFT 2 #define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT) #define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3 #define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT) #define I40E_PFPCI_CNF_IO_BAR_SHIFT 4 #define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT) #define I40E_PFPCI_CNF_INT_PIN_SHIFT 5 #define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT) #define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */ #define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0 #define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT) #define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16 #define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT) #define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */ #define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0 #define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT) #define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3 #define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT) #define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */ #define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0 #define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT) #define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1 #define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT) #define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2 #define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT) #define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */ #define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0 #define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT) #define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */ #define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0 #define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT) #define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */ #define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0 #define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT) #define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */ #define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 #define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT) #define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */ #define I40E_PFPCI_PM_PME_EN_SHIFT 0 #define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT) #define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */ #define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0 #define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT) #define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */ #define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0 #define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT) #define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16 #define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT) #define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */ #define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 #define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT) #define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */ #define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127 #define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0 #define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT) #define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */ #define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0 #define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT) #define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */ #define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0 #define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT) #define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */ #define I40E_PFPCI_VMPEND_PENDING_SHIFT 0 #define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT) #define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */ #define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29 #define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT) #define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30 #define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT) #define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31 #define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT) #define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */ #define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16 #define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT) #define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24 #define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT) #define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26 #define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT) #define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */ #define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31 #define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT) #define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */ #define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0 #define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT) #define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16 #define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT) #define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */ #define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0 #define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT) #define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */ #define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0 #define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT) #define I40E_PRTPM_GC_MNG_VETO_SHIFT 1 #define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT) #define I40E_PRTPM_GC_RATD_SHIFT 2 #define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT) #define I40E_PRTPM_GC_LCDMP_SHIFT 3 #define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT) #define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31 #define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT) #define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */ #define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0 #define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT) #define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */ #define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0 #define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT) #define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GL_PRS_FVBM_MAX_INDEX 3 #define I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT 0 #define I40E_GL_PRS_FVBM_FV_BYTE_INDX_MASK I40E_MASK(0x7F, I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT) #define I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT 8 #define I40E_GL_PRS_FVBM_RULE_BUS_INDX_MASK I40E_MASK(0x3F, I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT) #define I40E_GL_PRS_FVBM_MSK_ENA_SHIFT 31 #define I40E_GL_PRS_FVBM_MSK_ENA_MASK I40E_MASK(0x1, I40E_GL_PRS_FVBM_MSK_ENA_SHIFT) #define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */ #define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0 #define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT) #define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */ #define I40E_GLRPB_GHW_GHW_SHIFT 0 #define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT) #define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */ #define I40E_GLRPB_GLW_GLW_SHIFT 0 #define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT) #define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */ #define I40E_GLRPB_PHW_PHW_SHIFT 0 #define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT) #define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */ #define I40E_GLRPB_PLW_PLW_SHIFT 0 #define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT) #define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ #define I40E_PRTRPB_DHW_MAX_INDEX 7 #define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0 #define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT) #define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ #define I40E_PRTRPB_DLW_MAX_INDEX 7 #define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0 #define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT) #define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ #define I40E_PRTRPB_DPS_MAX_INDEX 7 #define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0 #define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT) #define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ #define I40E_PRTRPB_SHT_MAX_INDEX 7 #define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0 #define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT) #define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */ #define I40E_PRTRPB_SHW_SHW_SHIFT 0 #define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT) #define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ #define I40E_PRTRPB_SLT_MAX_INDEX 7 #define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0 #define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT) #define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */ #define I40E_PRTRPB_SLW_SLW_SHIFT 0 #define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT) #define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */ #define I40E_PRTRPB_SPS_SPS_SHIFT 0 #define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT) #define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */ #define I40E_GLQF_CTL_HTOEP_SHIFT 1 #define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT) #define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2 #define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT) #define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3 #define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT) #define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6 #define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT) #define I40E_GLQF_CTL_RSVD_SHIFT 7 #define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT) #define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8 #define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT) #define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11 #define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT) #define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14 #define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT) #define I40E_GLQF_CTL_FDBEST_SHIFT 17 #define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT) #define I40E_GLQF_CTL_PROGPRIO_SHIFT 25 #define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT) #define I40E_GLQF_CTL_INVALPRIO_SHIFT 26 #define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT) #define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27 #define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT) #define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */ #define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0 #define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT) #define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13 #define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT) #define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ #define I40E_GLQF_HKEY_MAX_INDEX 12 #define I40E_GLQF_HKEY_KEY_0_SHIFT 0 #define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT) #define I40E_GLQF_HKEY_KEY_1_SHIFT 8 #define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT) #define I40E_GLQF_HKEY_KEY_2_SHIFT 16 #define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT) #define I40E_GLQF_HKEY_KEY_3_SHIFT 24 #define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT) #define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */ #define I40E_GLQF_HSYM_MAX_INDEX 63 #define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0 #define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT) #define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */ #define I40E_GLQF_PCNT_MAX_INDEX 511 #define I40E_GLQF_PCNT_PCNT_SHIFT 0 #define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT) #define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ #define I40E_GLQF_SWAP_MAX_INDEX 1 #define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0 #define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT) #define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6 #define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT) #define I40E_GLQF_SWAP_FLEN0_SHIFT 12 #define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT) #define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16 #define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT) #define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22 #define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT) #define I40E_GLQF_SWAP_FLEN1_SHIFT 28 #define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT) #define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */ #define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0 #define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT) #define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5 #define I40E_PFQF_CTL_0_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEDSIZE_SHIFT) #define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10 #define I40E_PFQF_CTL_0_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) #define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14 #define I40E_PFQF_CTL_0_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) #define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16 #define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) #define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17 #define I40E_PFQF_CTL_0_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_FD_ENA_SHIFT) #define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18 #define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT) #define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19 #define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT) #define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20 #define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT) #define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24 #define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT) #define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */ #define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0 #define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT) #define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */ #define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0 #define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT) #define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8 #define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT) #define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */ #define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0 #define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT) #define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16 #define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT) #define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */ #define I40E_PFQF_HENA_MAX_INDEX 1 #define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0 #define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT) #define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */ #define I40E_PFQF_HKEY_MAX_INDEX 12 #define I40E_PFQF_HKEY_KEY_0_SHIFT 0 #define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT) #define I40E_PFQF_HKEY_KEY_1_SHIFT 8 #define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT) #define I40E_PFQF_HKEY_KEY_2_SHIFT 16 #define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT) #define I40E_PFQF_HKEY_KEY_3_SHIFT 24 #define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT) #define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_PFQF_HLUT_MAX_INDEX 127 #define I40E_PFQF_HLUT_LUT0_SHIFT 0 #define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT) #define I40E_PFQF_HLUT_LUT1_SHIFT 8 #define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT) #define I40E_PFQF_HLUT_LUT2_SHIFT 16 #define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT) #define I40E_PFQF_HLUT_LUT3_SHIFT 24 #define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT) #define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */ #define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0 #define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT) #define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */ #define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63 #define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0 #define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) #define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ #define I40E_PRTQF_FD_INSET_MAX_INDEX 63 #define I40E_PRTQF_FD_INSET_INSET_SHIFT 0 #define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT) #define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ #define I40E_PRTQF_FD_INSET_MAX_INDEX 63 #define I40E_PRTQF_FD_INSET_INSET_SHIFT 0 #define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT) #define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ #define I40E_PRTQF_FD_MSK_MAX_INDEX 63 #define I40E_PRTQF_FD_MSK_MASK_SHIFT 0 #define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT) #define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16 #define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT) #define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */ #define I40E_PRTQF_FLX_PIT_MAX_INDEX 8 #define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0 #define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) #define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5 #define I40E_PRTQF_FLX_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) #define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10 #define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) #define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */ #define I40E_VFQF_HENA1_MAX_INDEX 1 #define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0 #define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT) #define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */ #define I40E_VFQF_HKEY1_MAX_INDEX 12 #define I40E_VFQF_HKEY1_KEY_0_SHIFT 0 #define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT) #define I40E_VFQF_HKEY1_KEY_1_SHIFT 8 #define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT) #define I40E_VFQF_HKEY1_KEY_2_SHIFT 16 #define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT) #define I40E_VFQF_HKEY1_KEY_3_SHIFT 24 #define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT) #define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */ #define I40E_VFQF_HLUT1_MAX_INDEX 15 #define I40E_VFQF_HLUT1_LUT0_SHIFT 0 #define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT) #define I40E_VFQF_HLUT1_LUT1_SHIFT 8 #define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT) #define I40E_VFQF_HLUT1_LUT2_SHIFT 16 #define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT) #define I40E_VFQF_HLUT1_LUT3_SHIFT 24 #define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT) #define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */ #define I40E_VFQF_HREGION1_MAX_INDEX 7 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT) #define I40E_VFQF_HREGION1_REGION_0_SHIFT 1 #define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT) #define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT) #define I40E_VFQF_HREGION1_REGION_1_SHIFT 5 #define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT) #define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT) #define I40E_VFQF_HREGION1_REGION_2_SHIFT 9 #define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT) #define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT) #define I40E_VFQF_HREGION1_REGION_3_SHIFT 13 #define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT) #define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT) #define I40E_VFQF_HREGION1_REGION_4_SHIFT 17 #define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT) #define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT) #define I40E_VFQF_HREGION1_REGION_5_SHIFT 21 #define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT) #define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT) #define I40E_VFQF_HREGION1_REGION_6_SHIFT 25 #define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT) #define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT) #define I40E_VFQF_HREGION1_REGION_7_SHIFT 29 #define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT) #define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ #define I40E_VPQF_CTL_MAX_INDEX 127 #define I40E_VPQF_CTL_PEHSIZE_SHIFT 0 #define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT) #define I40E_VPQF_CTL_PEDSIZE_SHIFT 5 #define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT) #define I40E_VPQF_CTL_FCHSIZE_SHIFT 10 #define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT) #define I40E_VPQF_CTL_FCDSIZE_SHIFT 14 #define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT) #define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */ #define I40E_VSIQF_CTL_MAX_INDEX 383 #define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0 #define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT) #define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1 #define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT) #define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2 #define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT) #define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3 #define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT) #define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4 #define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT) #define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5 #define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT) #define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */ #define I40E_VSIQF_TCREGION_MAX_INDEX 3 #define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0 #define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT) #define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9 #define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT) #define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16 #define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT) #define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25 #define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT) #define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ #define I40E_GL_FCOECRC_MAX_INDEX 143 #define I40E_GL_FCOECRC_FCOECRC_SHIFT 0 #define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT) #define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ #define I40E_GL_FCOEDDPC_MAX_INDEX 143 #define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0 #define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT) #define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ #define I40E_GL_FCOEDIFEC_MAX_INDEX 143 #define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0 #define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT) #define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ #define I40E_GL_FCOEDIFTCL_MAX_INDEX 143 #define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0 #define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT) #define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ #define I40E_GL_FCOEDIXEC_MAX_INDEX 143 #define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0 #define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT) #define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ #define I40E_GL_FCOEDIXVC_MAX_INDEX 143 #define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0 #define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT) #define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ #define I40E_GL_FCOEDWRCH_MAX_INDEX 143 #define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0 #define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT) #define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ #define I40E_GL_FCOEDWRCL_MAX_INDEX 143 #define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0 #define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT) #define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ #define I40E_GL_FCOEDWTCH_MAX_INDEX 143 #define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0 #define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT) #define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ #define I40E_GL_FCOEDWTCL_MAX_INDEX 143 #define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0 #define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT) #define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ #define I40E_GL_FCOELAST_MAX_INDEX 143 #define I40E_GL_FCOELAST_FCOELAST_SHIFT 0 #define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT) #define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ #define I40E_GL_FCOEPRC_MAX_INDEX 143 #define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0 #define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT) #define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ #define I40E_GL_FCOEPTC_MAX_INDEX 143 #define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0 #define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT) #define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ #define I40E_GL_FCOERPDC_MAX_INDEX 143 #define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0 #define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT) #define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ #define I40E_GL_RXERR1_L_MAX_INDEX 143 #define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0 #define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT) #define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ #define I40E_GL_RXERR2_L_MAX_INDEX 143 #define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0 #define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) #define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPRCH_MAX_INDEX 3 #define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0 #define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT) #define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPRCL_MAX_INDEX 3 #define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0 #define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT) #define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPTCH_MAX_INDEX 3 #define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0 #define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT) #define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPTCL_MAX_INDEX 3 #define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0 #define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT) #define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_CRCERRS_MAX_INDEX 3 #define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 #define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT) #define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_GORCH_MAX_INDEX 3 #define I40E_GLPRT_GORCH_GORCH_SHIFT 0 #define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT) #define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_GORCL_MAX_INDEX 3 #define I40E_GLPRT_GORCL_GORCL_SHIFT 0 #define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT) #define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_GOTCH_MAX_INDEX 3 #define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0 #define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT) #define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_GOTCL_MAX_INDEX 3 #define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0 #define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT) #define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_ILLERRC_MAX_INDEX 3 #define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0 #define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT) #define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_LDPC_MAX_INDEX 3 #define I40E_GLPRT_LDPC_LDPC_SHIFT 0 #define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT) #define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3 #define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0 #define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT) #define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3 #define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0 #define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT) #define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_LXONRXC_MAX_INDEX 3 #define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0 #define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT) #define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_LXONTXC_MAX_INDEX 3 #define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0 #define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT) #define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_MLFC_MAX_INDEX 3 #define I40E_GLPRT_MLFC_MLFC_SHIFT 0 #define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT) #define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_MPRCH_MAX_INDEX 3 #define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0 #define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT) #define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_MPRCL_MAX_INDEX 3 #define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0 #define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT) #define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_MPTCH_MAX_INDEX 3 #define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0 #define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT) #define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_MPTCL_MAX_INDEX 3 #define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0 #define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT) #define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_MRFC_MAX_INDEX 3 #define I40E_GLPRT_MRFC_MRFC_SHIFT 0 #define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT) #define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PRC1023H_MAX_INDEX 3 #define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0 #define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT) #define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PRC1023L_MAX_INDEX 3 #define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0 #define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT) #define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PRC127H_MAX_INDEX 3 #define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0 #define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT) #define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PRC127L_MAX_INDEX 3 #define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0 #define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT) #define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PRC1522H_MAX_INDEX 3 #define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0 #define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT) #define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PRC1522L_MAX_INDEX 3 #define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0 #define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT) #define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PRC255H_MAX_INDEX 3 #define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0 #define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT) #define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PRC255L_MAX_INDEX 3 #define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0 #define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT) #define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PRC511H_MAX_INDEX 3 #define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0 #define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT) #define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PRC511L_MAX_INDEX 3 #define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0 #define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT) #define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PRC64H_MAX_INDEX 3 #define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0 #define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT) #define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PRC64L_MAX_INDEX 3 #define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0 #define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT) #define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PRC9522H_MAX_INDEX 3 #define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0 #define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT) #define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PRC9522L_MAX_INDEX 3 #define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0 #define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT) #define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PTC1023H_MAX_INDEX 3 #define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0 #define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT) #define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PTC1023L_MAX_INDEX 3 #define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0 #define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT) #define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PTC127H_MAX_INDEX 3 #define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0 #define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT) #define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PTC127L_MAX_INDEX 3 #define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0 #define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT) #define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PTC1522H_MAX_INDEX 3 #define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0 #define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT) #define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PTC1522L_MAX_INDEX 3 #define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0 #define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT) #define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PTC255H_MAX_INDEX 3 #define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0 #define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT) #define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PTC255L_MAX_INDEX 3 #define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0 #define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT) #define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PTC511H_MAX_INDEX 3 #define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0 #define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT) #define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PTC511L_MAX_INDEX 3 #define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0 #define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT) #define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PTC64H_MAX_INDEX 3 #define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0 #define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT) #define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PTC64L_MAX_INDEX 3 #define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0 #define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT) #define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PTC9522H_MAX_INDEX 3 #define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0 #define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT) #define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_PTC9522L_MAX_INDEX 3 #define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0 #define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT) #define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ #define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3 #define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0 #define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT) #define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ #define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3 #define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0 #define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT) #define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ #define I40E_GLPRT_PXONRXC_MAX_INDEX 3 #define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0 #define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT) #define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ #define I40E_GLPRT_PXONTXC_MAX_INDEX 3 #define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0 #define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT) #define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_RDPC_MAX_INDEX 3 #define I40E_GLPRT_RDPC_RDPC_SHIFT 0 #define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT) #define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_RFC_MAX_INDEX 3 #define I40E_GLPRT_RFC_RFC_SHIFT 0 #define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT) #define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_RJC_MAX_INDEX 3 #define I40E_GLPRT_RJC_RJC_SHIFT 0 #define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT) #define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_RLEC_MAX_INDEX 3 #define I40E_GLPRT_RLEC_RLEC_SHIFT 0 #define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT) #define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_ROC_MAX_INDEX 3 #define I40E_GLPRT_ROC_ROC_SHIFT 0 #define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT) #define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_RUC_MAX_INDEX 3 #define I40E_GLPRT_RUC_RUC_SHIFT 0 #define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT) #define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_RUPP_MAX_INDEX 3 #define I40E_GLPRT_RUPP_RUPP_SHIFT 0 #define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT) #define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ #define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3 #define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0 #define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT) #define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_TDOLD_MAX_INDEX 3 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) #define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_UPRCH_MAX_INDEX 3 #define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 #define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT) #define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_UPRCL_MAX_INDEX 3 #define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0 #define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT) #define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_UPTCH_MAX_INDEX 3 #define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0 #define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT) #define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_UPTCL_MAX_INDEX 3 #define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0 #define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT) #define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLSW_BPRCH_MAX_INDEX 15 #define I40E_GLSW_BPRCH_BPRCH_SHIFT 0 #define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT) #define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLSW_BPRCL_MAX_INDEX 15 #define I40E_GLSW_BPRCL_BPRCL_SHIFT 0 #define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT) #define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLSW_BPTCH_MAX_INDEX 15 #define I40E_GLSW_BPTCH_BPTCH_SHIFT 0 #define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT) #define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLSW_BPTCL_MAX_INDEX 15 #define I40E_GLSW_BPTCL_BPTCL_SHIFT 0 #define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT) #define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLSW_GORCH_MAX_INDEX 15 #define I40E_GLSW_GORCH_GORCH_SHIFT 0 #define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT) #define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLSW_GORCL_MAX_INDEX 15 #define I40E_GLSW_GORCL_GORCL_SHIFT 0 #define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT) #define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLSW_GOTCH_MAX_INDEX 15 #define I40E_GLSW_GOTCH_GOTCH_SHIFT 0 #define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT) #define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLSW_GOTCL_MAX_INDEX 15 #define I40E_GLSW_GOTCL_GOTCL_SHIFT 0 #define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT) #define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLSW_MPRCH_MAX_INDEX 15 #define I40E_GLSW_MPRCH_MPRCH_SHIFT 0 #define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT) #define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLSW_MPRCL_MAX_INDEX 15 #define I40E_GLSW_MPRCL_MPRCL_SHIFT 0 #define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT) #define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLSW_MPTCH_MAX_INDEX 15 #define I40E_GLSW_MPTCH_MPTCH_SHIFT 0 #define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT) #define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLSW_MPTCL_MAX_INDEX 15 #define I40E_GLSW_MPTCL_MPTCL_SHIFT 0 #define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT) #define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLSW_RUPP_MAX_INDEX 15 #define I40E_GLSW_RUPP_RUPP_SHIFT 0 #define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT) #define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLSW_TDPC_MAX_INDEX 15 #define I40E_GLSW_TDPC_TDPC_SHIFT 0 #define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT) #define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLSW_UPRCH_MAX_INDEX 15 #define I40E_GLSW_UPRCH_UPRCH_SHIFT 0 #define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT) #define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLSW_UPRCL_MAX_INDEX 15 #define I40E_GLSW_UPRCL_UPRCL_SHIFT 0 #define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT) #define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLSW_UPTCH_MAX_INDEX 15 #define I40E_GLSW_UPTCH_UPTCH_SHIFT 0 #define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT) #define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLSW_UPTCL_MAX_INDEX 15 #define I40E_GLSW_UPTCL_UPTCL_SHIFT 0 #define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT) #define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ #define I40E_GLV_BPRCH_MAX_INDEX 383 #define I40E_GLV_BPRCH_BPRCH_SHIFT 0 #define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT) #define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ #define I40E_GLV_BPRCL_MAX_INDEX 383 #define I40E_GLV_BPRCL_BPRCL_SHIFT 0 #define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT) #define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ #define I40E_GLV_BPTCH_MAX_INDEX 383 #define I40E_GLV_BPTCH_BPTCH_SHIFT 0 #define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT) #define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ #define I40E_GLV_BPTCL_MAX_INDEX 383 #define I40E_GLV_BPTCL_BPTCL_SHIFT 0 #define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT) #define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ #define I40E_GLV_GORCH_MAX_INDEX 383 #define I40E_GLV_GORCH_GORCH_SHIFT 0 #define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT) #define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ #define I40E_GLV_GORCL_MAX_INDEX 383 #define I40E_GLV_GORCL_GORCL_SHIFT 0 #define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT) #define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ #define I40E_GLV_GOTCH_MAX_INDEX 383 #define I40E_GLV_GOTCH_GOTCH_SHIFT 0 #define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT) #define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ #define I40E_GLV_GOTCL_MAX_INDEX 383 #define I40E_GLV_GOTCL_GOTCL_SHIFT 0 #define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT) #define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ #define I40E_GLV_MPRCH_MAX_INDEX 383 #define I40E_GLV_MPRCH_MPRCH_SHIFT 0 #define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT) #define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ #define I40E_GLV_MPRCL_MAX_INDEX 383 #define I40E_GLV_MPRCL_MPRCL_SHIFT 0 #define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT) #define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ #define I40E_GLV_MPTCH_MAX_INDEX 383 #define I40E_GLV_MPTCH_MPTCH_SHIFT 0 #define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT) #define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ #define I40E_GLV_MPTCL_MAX_INDEX 383 #define I40E_GLV_MPTCL_MPTCL_SHIFT 0 #define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT) #define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ #define I40E_GLV_RDPC_MAX_INDEX 383 #define I40E_GLV_RDPC_RDPC_SHIFT 0 #define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT) #define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ #define I40E_GLV_RUPP_MAX_INDEX 383 #define I40E_GLV_RUPP_RUPP_SHIFT 0 #define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT) #define I40E_GLV_TEPC(_i) (0x00344000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ #define I40E_GLV_TEPC_MAX_INDEX 383 #define I40E_GLV_TEPC_TEPC_SHIFT 0 #define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT) #define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ #define I40E_GLV_UPRCH_MAX_INDEX 383 #define I40E_GLV_UPRCH_UPRCH_SHIFT 0 #define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT) #define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ #define I40E_GLV_UPRCL_MAX_INDEX 383 #define I40E_GLV_UPRCL_UPRCL_SHIFT 0 #define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT) #define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ #define I40E_GLV_UPTCH_MAX_INDEX 383 #define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0 #define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT) #define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ #define I40E_GLV_UPTCL_MAX_INDEX 383 #define I40E_GLV_UPTCL_UPTCL_SHIFT 0 #define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT) #define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ #define I40E_GLVEBTC_RBCH_MAX_INDEX 7 #define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0 #define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT) #define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ #define I40E_GLVEBTC_RBCL_MAX_INDEX 7 #define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0 #define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT) #define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ #define I40E_GLVEBTC_RPCH_MAX_INDEX 7 #define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0 #define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT) #define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ #define I40E_GLVEBTC_RPCL_MAX_INDEX 7 #define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0 #define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT) #define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ #define I40E_GLVEBTC_TBCH_MAX_INDEX 7 #define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0 #define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT) #define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ #define I40E_GLVEBTC_TBCL_MAX_INDEX 7 #define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0 #define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT) #define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ #define I40E_GLVEBTC_TPCH_MAX_INDEX 7 #define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0 #define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT) #define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ #define I40E_GLVEBTC_TPCL_MAX_INDEX 7 #define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0 #define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT) #define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_GLVEBVL_BPCH_MAX_INDEX 127 #define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0 #define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT) #define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_GLVEBVL_BPCL_MAX_INDEX 127 #define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0 #define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT) #define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_GLVEBVL_GORCH_MAX_INDEX 127 #define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0 #define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT) #define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_GLVEBVL_GORCL_MAX_INDEX 127 #define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0 #define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT) #define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_GLVEBVL_GOTCH_MAX_INDEX 127 #define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0 #define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT) #define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_GLVEBVL_GOTCL_MAX_INDEX 127 #define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0 #define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT) #define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_GLVEBVL_MPCH_MAX_INDEX 127 #define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0 #define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT) #define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_GLVEBVL_MPCL_MAX_INDEX 127 #define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0 #define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT) #define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_GLVEBVL_UPCH_MAX_INDEX 127 #define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0 #define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT) #define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_GLVEBVL_UPCL_MAX_INDEX 127 #define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0 #define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT) #define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */ #define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0 #define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT) #define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */ #define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35 #define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0 #define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT) #define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ #define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1 #define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0 #define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT) #define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */ #define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0 #define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT) #define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31 #define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT) #define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ #define I40E_PRTTSYN_AUX_0_MAX_INDEX 1 #define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0 #define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT) #define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1 #define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT) #define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3 #define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT) #define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8 #define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT) #define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16 #define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT) #define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ #define I40E_PRTTSYN_AUX_1_MAX_INDEX 1 #define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0 #define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT) #define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1 #define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT) #define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ #define I40E_PRTTSYN_CLKO_MAX_INDEX 1 #define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0 #define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT) #define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */ #define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0 #define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT) #define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1 #define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT) #define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2 #define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT) #define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3 #define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT) #define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8 #define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT) #define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12 #define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT) #define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31 #define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT) #define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */ #define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0 #define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT) #define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8 #define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT) #define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16 #define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT) #define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20 #define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT) #define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24 #define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) #define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26 #define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT) #define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31 #define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT) #define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ #define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1 #define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0 #define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT) #define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ #define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1 #define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0 #define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT) #define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */ #define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0 #define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT) #define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */ #define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0 #define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT) #define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3 #define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0 #define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT) #define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3 #define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0 #define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT) #define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */ #define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0 #define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT) #define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1 #define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT) #define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2 #define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT) #define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3 #define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT) #define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4 #define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT) #define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */ #define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0 #define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT) #define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1 #define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT) #define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2 #define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT) #define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3 #define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT) #define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ #define I40E_PRTTSYN_TGT_H_MAX_INDEX 1 #define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0 #define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT) #define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ #define I40E_PRTTSYN_TGT_L_MAX_INDEX 1 #define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0 #define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT) #define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */ #define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0 #define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT) #define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */ #define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0 #define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT) #define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */ #define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0 #define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT) #define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) #define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 #define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) #define I40E_GL_MDET_RX_EVENT_SHIFT 8 #define I40E_GL_MDET_RX_EVENT_MASK I40E_MASK(0x1FF, I40E_GL_MDET_RX_EVENT_SHIFT) #define I40E_GL_MDET_RX_QUEUE_SHIFT 17 #define I40E_GL_MDET_RX_QUEUE_MASK I40E_MASK(0x3FFF, I40E_GL_MDET_RX_QUEUE_SHIFT) #define I40E_GL_MDET_RX_VALID_SHIFT 31 #define I40E_GL_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_RX_VALID_SHIFT) #define I40E_GL_MDET_TX 0x000E6480 /* Reset: CORER */ #define I40E_GL_MDET_TX_QUEUE_SHIFT 0 #define I40E_GL_MDET_TX_QUEUE_MASK I40E_MASK(0xFFF, I40E_GL_MDET_TX_QUEUE_SHIFT) #define I40E_GL_MDET_TX_VF_NUM_SHIFT 12 #define I40E_GL_MDET_TX_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GL_MDET_TX_VF_NUM_SHIFT) #define I40E_GL_MDET_TX_PF_NUM_SHIFT 21 #define I40E_GL_MDET_TX_PF_NUM_MASK I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT) #define I40E_GL_MDET_TX_EVENT_SHIFT 25 #define I40E_GL_MDET_TX_EVENT_MASK I40E_MASK(0x1F, I40E_GL_MDET_TX_EVENT_SHIFT) #define I40E_GL_MDET_TX_VALID_SHIFT 31 #define I40E_GL_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_TX_VALID_SHIFT) #define I40E_PF_MDET_RX 0x0012A400 /* Reset: CORER */ #define I40E_PF_MDET_RX_VALID_SHIFT 0 #define I40E_PF_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT) #define I40E_PF_MDET_TX 0x000E6400 /* Reset: CORER */ #define I40E_PF_MDET_TX_VALID_SHIFT 0 #define I40E_PF_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT) #define I40E_PF_VT_PFALLOC 0x001C0500 /* Reset: CORER */ #define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0 #define I40E_PF_VT_PFALLOC_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT) #define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8 #define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT) #define I40E_PF_VT_PFALLOC_VALID_SHIFT 31 #define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT) #define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VP_MDET_RX_MAX_INDEX 127 #define I40E_VP_MDET_RX_VALID_SHIFT 0 #define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT) #define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VP_MDET_TX_MAX_INDEX 127 #define I40E_VP_MDET_TX_VALID_SHIFT 0 #define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT) #define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */ #define I40E_GLPM_WUMC_NOTCO_SHIFT 0 #define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT) #define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1 #define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT) #define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2 #define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT) #define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3 #define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT) #define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16 #define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT) #define I40E_PFPM_APM 0x000B8080 /* Reset: POR */ #define I40E_PFPM_APM_APME_SHIFT 0 #define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT) #define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */ #define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7 #define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0 #define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT) #define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */ #define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5 #define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT) #define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */ #define I40E_PFPM_WUFC_LNKC_SHIFT 0 #define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT) #define I40E_PFPM_WUFC_MAG_SHIFT 1 #define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT) #define I40E_PFPM_WUFC_MNG_SHIFT 3 #define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT) #define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4 #define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT) #define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5 #define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT) #define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6 #define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT) #define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7 #define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT) #define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8 #define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT) #define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9 #define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT) #define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10 #define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT) #define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11 #define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT) #define I40E_PFPM_WUFC_FLX0_SHIFT 16 #define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT) #define I40E_PFPM_WUFC_FLX1_SHIFT 17 #define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT) #define I40E_PFPM_WUFC_FLX2_SHIFT 18 #define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT) #define I40E_PFPM_WUFC_FLX3_SHIFT 19 #define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT) #define I40E_PFPM_WUFC_FLX4_SHIFT 20 #define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT) #define I40E_PFPM_WUFC_FLX5_SHIFT 21 #define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT) #define I40E_PFPM_WUFC_FLX6_SHIFT 22 #define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT) #define I40E_PFPM_WUFC_FLX7_SHIFT 23 #define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT) #define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31 #define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT) #define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */ #define I40E_PFPM_WUS_LNKC_SHIFT 0 #define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT) #define I40E_PFPM_WUS_MAG_SHIFT 1 #define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT) #define I40E_PFPM_WUS_PME_STATUS_SHIFT 2 #define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT) #define I40E_PFPM_WUS_MNG_SHIFT 3 #define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT) #define I40E_PFPM_WUS_FLX0_SHIFT 16 #define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT) #define I40E_PFPM_WUS_FLX1_SHIFT 17 #define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT) #define I40E_PFPM_WUS_FLX2_SHIFT 18 #define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT) #define I40E_PFPM_WUS_FLX3_SHIFT 19 #define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT) #define I40E_PFPM_WUS_FLX4_SHIFT 20 #define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT) #define I40E_PFPM_WUS_FLX5_SHIFT 21 #define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT) #define I40E_PFPM_WUS_FLX6_SHIFT 22 #define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT) #define I40E_PFPM_WUS_FLX7_SHIFT 23 #define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT) #define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31 #define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT) #define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */ #define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0 #define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT) #define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1 #define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT) #define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */ #define I40E_PRTPM_SAH_MAX_INDEX 3 #define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0 #define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT) #define I40E_PRTPM_SAH_PF_NUM_SHIFT 26 #define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT) #define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30 #define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT) #define I40E_PRTPM_SAH_AV_SHIFT 31 #define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT) #define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */ #define I40E_PRTPM_SAL_MAX_INDEX 3 #define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0 #define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT) #define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ #define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0 #define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT) #define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */ #define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0 #define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT) #define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */ #define I40E_VF_ARQH1_ARQH_SHIFT 0 #define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT) #define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */ #define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0 #define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT) #define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28 #define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT) #define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29 #define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT) #define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30 #define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT) #define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31 #define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) #define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ #define I40E_VF_ARQT1_ARQT_SHIFT 0 #define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT) #define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */ #define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0 #define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT) #define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */ #define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0 #define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT) #define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */ #define I40E_VF_ATQH1_ATQH_SHIFT 0 #define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT) #define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */ #define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0 #define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT) #define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28 #define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT) #define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29 #define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT) #define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30 #define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT) #define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31 #define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) #define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ #define I40E_VF_ATQT1_ATQT_SHIFT 0 #define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT) #define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */ #define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0 #define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT) #define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */ #define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0 #define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT) #define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1 #define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT) #define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2 #define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT) #define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3 #define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) #define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5 #define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT) #define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24 #define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT) #define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25 #define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT) #define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31 #define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT) #define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ #define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15 #define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0 #define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT) #define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1 #define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT) #define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2 #define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT) #define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3 #define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) #define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5 #define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT) #define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24 #define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT) #define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25 #define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT) #define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31 #define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT) #define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */ #define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25 #define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT) #define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30 #define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT) #define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31 #define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT) #define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */ #define I40E_VFINT_ICR01_INTEVENT_SHIFT 0 #define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT) #define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1 #define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT) #define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2 #define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT) #define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3 #define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT) #define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4 #define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT) #define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25 #define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT) #define I40E_VFINT_ICR01_ADMINQ_SHIFT 30 #define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT) #define I40E_VFINT_ICR01_SWINT_SHIFT 31 #define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT) #define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */ #define I40E_VFINT_ITR01_MAX_INDEX 2 #define I40E_VFINT_ITR01_INTERVAL_SHIFT 0 #define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT) #define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */ #define I40E_VFINT_ITRN1_MAX_INDEX 2 #define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 #define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) #define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) #define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_QRX_TAIL1_MAX_INDEX 15 #define I40E_QRX_TAIL1_TAIL_SHIFT 0 #define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT) #define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */ #define I40E_QTX_TAIL1_MAX_INDEX 15 #define I40E_QTX_TAIL1_TAIL_SHIFT 0 #define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT) #define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */ #define I40E_VFMSIX_PBA_PENBIT_SHIFT 0 #define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT) #define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ #define I40E_VFMSIX_TADD_MAX_INDEX 16 #define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0 #define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT) #define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2 #define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT) #define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ #define I40E_VFMSIX_TMSG_MAX_INDEX 16 #define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0 #define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT) #define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ #define I40E_VFMSIX_TUADD_MAX_INDEX 16 #define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0 #define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT) #define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ #define I40E_VFMSIX_TVCTRL_MAX_INDEX 16 #define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0 #define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT) #define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */ #define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 #define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT) #define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 #define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT) #define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8 #define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT) #define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */ #define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 #define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT) #define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 #define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT) #define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 #define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) #define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 #define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) #define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 #define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) #define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ #define I40E_VFQF_HENA_MAX_INDEX 1 #define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0 #define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT) #define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ #define I40E_VFQF_HKEY_MAX_INDEX 12 #define I40E_VFQF_HKEY_KEY_0_SHIFT 0 #define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT) #define I40E_VFQF_HKEY_KEY_1_SHIFT 8 #define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT) #define I40E_VFQF_HKEY_KEY_2_SHIFT 16 #define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT) #define I40E_VFQF_HKEY_KEY_3_SHIFT 24 #define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT) #define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_VFQF_HLUT_MAX_INDEX 15 #define I40E_VFQF_HLUT_LUT0_SHIFT 0 #define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT) #define I40E_VFQF_HLUT_LUT1_SHIFT 8 #define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT) #define I40E_VFQF_HLUT_LUT2_SHIFT 16 #define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT) #define I40E_VFQF_HLUT_LUT3_SHIFT 24 #define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT) #define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */ #define I40E_VFQF_HREGION_MAX_INDEX 7 #define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 #define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT) #define I40E_VFQF_HREGION_REGION_0_SHIFT 1 #define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT) #define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 #define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT) #define I40E_VFQF_HREGION_REGION_1_SHIFT 5 #define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT) #define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 #define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT) #define I40E_VFQF_HREGION_REGION_2_SHIFT 9 #define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT) #define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 #define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT) #define I40E_VFQF_HREGION_REGION_3_SHIFT 13 #define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT) #define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 #define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT) #define I40E_VFQF_HREGION_REGION_4_SHIFT 17 #define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT) #define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 #define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT) #define I40E_VFQF_HREGION_REGION_5_SHIFT 21 #define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT) #define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 #define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT) #define I40E_VFQF_HREGION_REGION_6_SHIFT 25 #define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT) #define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 #define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT) #define I40E_VFQF_HREGION_REGION_7_SHIFT 29 #define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT) #define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */ #define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0 #define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT) #define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */ #define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2 #define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT) #define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3 #define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT) #define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4 #define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT) #define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8 #define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT) #define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */ #define I40E_MNGSB_FDS_START_BC_SHIFT 0 #define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT) #define I40E_MNGSB_FDS_LAST_BC_SHIFT 16 #define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT) #define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ #define I40E_GL_VF_CTRL_RX_MAX_INDEX 127 #define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0 #define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT) #define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ #define I40E_GL_VF_CTRL_TX_MAX_INDEX 127 #define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0 #define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT) #define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */ #define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0 #define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT) #define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12 #define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT) #define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16 #define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT) #define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */ #define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0 #define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT) #define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12 #define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT) #define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16 #define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT) #define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */ #define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 #define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT) #define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 #define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT) #define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8 #define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT) #define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */ #define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 #define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT) #define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 #define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT) #define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 #define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) #define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 #define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) #define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 #define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) #define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ #define I40E_PRTDCB_TFMSTC_MAX_INDEX 7 #define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0 #define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT) #define I40E_GL_FWSTS_FWROWD_SHIFT 8 #define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT) #define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */ #define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0 #define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT) #define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8 #define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT) #define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20 #define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT) #define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15 #define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0 #define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT) #define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_CEQPART_MAX_INDEX 15 #define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0 #define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT) #define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16 #define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT) #define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */ #define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0 #define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT) #define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_DBCQPART_MAX_INDEX 15 #define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0 #define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT) #define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16 #define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT) #define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */ #define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0 #define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT) #define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_DBQPPART_MAX_INDEX 15 #define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0 #define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT) #define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16 #define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT) #define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_PEARPBASE_MAX_INDEX 15 #define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0 #define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT) #define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_PEARPCNT_MAX_INDEX 15 #define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0 #define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT) #define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */ #define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0 #define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT) #define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */ #define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0 #define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT) #define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_PECQBASE_MAX_INDEX 15 #define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0 #define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT) #define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_PECQCNT_MAX_INDEX 15 #define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0 #define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT) #define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */ #define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0 #define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT) #define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_PEHTCNT_MAX_INDEX 15 #define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0 #define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT) #define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15 #define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0 #define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT) #define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */ #define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0 #define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT) #define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */ #define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0 #define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT) #define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_PEMRBASE_MAX_INDEX 15 #define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0 #define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT) #define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_PEMRCNT_MAX_INDEX 15 #define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0 #define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT) #define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */ #define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0 #define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT) #define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */ #define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0 #define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT) #define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15 #define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0 #define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT) #define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15 #define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0 #define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT) #define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */ #define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0 #define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT) #define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */ #define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0 #define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT) #define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15 #define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0 #define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT) #define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15 #define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0 #define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT) #define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15 #define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0 #define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT) #define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */ #define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0 #define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT) #define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */ #define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0 #define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT) #define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */ #define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0 #define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT) #define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_PEQPBASE_MAX_INDEX 15 #define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0 #define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT) #define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_PEQPCNT_MAX_INDEX 15 #define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0 #define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT) #define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */ #define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0 #define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT) #define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_PESRQBASE_MAX_INDEX 15 #define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0 #define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT) #define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_PESRQCNT_MAX_INDEX 15 #define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0 #define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT) #define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */ #define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0 #define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT) #define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */ #define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0 #define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT) #define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15 #define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0 #define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT) #define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15 #define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0 #define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT) #define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */ #define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0 #define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT) #define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */ #define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0 #define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT) #define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_PEXFBASE_MAX_INDEX 15 #define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0 #define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT) #define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_PEXFCNT_MAX_INDEX 15 #define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0 #define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT) #define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15 #define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0 #define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT) #define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */ #define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0 #define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT) #define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */ #define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0 #define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT) #define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */ #define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0 #define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT) #define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLHMC_PFPESDPART_MAX_INDEX 15 #define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0 #define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT) #define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16 #define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT) #define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31 #define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0 #define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT) #define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFCEQPART_MAX_INDEX 31 #define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0 #define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT) #define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16 #define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT) #define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31 #define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0 #define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT) #define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16 #define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT) #define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31 #define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0 #define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT) #define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16 #define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT) #define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31 #define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0 #define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT) #define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31 #define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0 #define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT) #define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFPDINV_MAX_INDEX 31 #define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0 #define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT) #define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15 #define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT) #define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16 #define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT) #define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31 #define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0 #define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT) #define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31 #define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0 #define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT) #define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31 #define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0 #define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT) #define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31 #define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0 #define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT) #define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31 #define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0 #define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT) #define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31 #define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0 #define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT) #define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31 #define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0 #define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT) #define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31 #define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0 #define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT) #define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31 #define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0 #define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT) #define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31 #define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0 #define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT) #define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31 #define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0 #define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT) #define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31 #define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0 #define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT) #define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31 #define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0 #define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT) #define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31 #define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0 #define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT) #define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31 #define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0 #define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT) #define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31 #define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0 #define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT) #define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31 #define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0 #define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT) #define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31 #define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0 #define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT) #define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31 #define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0 #define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT) #define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31 #define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0 #define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT) #define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31 #define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0 #define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT) #define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31 #define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0 #define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT) #define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLHMC_VFSDPART_MAX_INDEX 31 #define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0 #define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT) #define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16 #define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT) #define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */ #define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0 #define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT) #define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8 #define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT) #define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20 #define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT) #define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */ #define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0 #define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT) #define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8 #define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT) #define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20 #define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT) #define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */ #define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0 #define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT) #define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8 #define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT) #define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20 #define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT) #define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15 #define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT) #define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15 #define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT) #define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */ #define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0 #define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT) #define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */ #define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0 #define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT) #define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */ #define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0 #define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT) #define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */ #define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0 #define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT) #define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1 #define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT) #define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2 #define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT) #define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3 #define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT) #define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4 #define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT) #define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5 #define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT) #define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6 #define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT) #define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7 #define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT) #define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8 #define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT) #define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9 #define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT) #define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10 #define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT) #define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11 #define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT) #define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12 #define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT) #define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13 #define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT) #define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14 #define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT) #define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */ #define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0 #define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT) #define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */ #define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0 #define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT) #define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2 #define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT) #define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3 #define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT) #define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30 #define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT) #define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30 #define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT) #define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30 #define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT) #define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30 #define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT) #define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ #define I40E_VPLAN_QBASE_MAX_INDEX 127 #define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0 #define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT) #define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11 #define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT) #define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31 #define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT) #define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */ #define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0 #define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT) #define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */ #define I40E_GLNVM_AL_REQ_POR_SHIFT 0 #define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT) #define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1 #define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT) #define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2 #define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT) #define I40E_GLNVM_AL_REQ_CORER_SHIFT 3 #define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT) #define I40E_GLNVM_AL_REQ_PE_SHIFT 4 #define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT) #define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5 #define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT) #define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */ #define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0 #define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT) #define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12 #define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT) #define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */ #define I40E_GLNVM_FLA_LOCKED_SHIFT 6 #define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT) #define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */ #define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0 #define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT) #define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1 #define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT) #define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3 #define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT) #define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4 #define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT) #define I40E_GLNVM_ULD_POR_DONE_SHIFT 5 #define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT) #define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8 #define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT) #define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9 #define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT) #define I40E_GLNVM_ULD_PE_DONE_SHIFT 10 #define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT) #define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */ #define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0 #define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT) #define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1 #define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT) #define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2 #define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT) #define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3 #define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT) #define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4 #define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT) #define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5 #define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT) #define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6 #define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT) #define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7 #define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT) #define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8 #define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT) #define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9 #define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT) #define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10 #define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT) #define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */ #define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0 #define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT) #define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1 #define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT) #define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2 #define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT) #define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3 #define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT) #define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4 #define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT) #define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5 #define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT) #define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6 #define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT) #define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7 #define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT) #define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8 #define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT) #define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9 #define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT) #define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10 #define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT) #define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11 #define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT) #define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12 #define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT) #define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13 #define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT) #define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14 #define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT) #define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15 #define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT) #define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16 #define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT) #define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */ #define I40E_MNGSB_DADD_ADDR_SHIFT 0 #define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT) #define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */ #define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0 #define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT) #define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */ #define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0 #define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT) #define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8 #define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT) #define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26 #define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT) #define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28 #define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT) #define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30 #define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT) #define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31 #define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT) #define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */ #define I40E_MNGSB_RDATA_DATA_SHIFT 0 #define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT) #define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */ #define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0 #define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT) #define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8 #define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT) #define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16 #define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT) #define I40E_MNGSB_RHDR0_TAG_SHIFT 24 #define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT) #define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27 #define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT) #define I40E_MNGSB_RHDR0_EH_SHIFT 31 #define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT) #define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */ #define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0 #define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT) #define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26 #define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT) #define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30 #define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT) #define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31 #define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT) #define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */ #define I40E_MNGSB_WDATA_DATA_SHIFT 0 #define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT) #define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */ #define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0 #define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT) #define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12 #define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT) #define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16 #define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT) #define I40E_MNGSB_WHDR0_TAG_SHIFT 24 #define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT) #define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */ #define I40E_MNGSB_WHDR1_ADDR_SHIFT 0 #define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT) #define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */ #define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0 #define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT) #define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21 #define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT) #define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */ #define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0 #define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT) #define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16 #define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT) #define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */ #define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0 #define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT) #define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */ #define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0 #define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT) #define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16 #define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT) #define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */ #define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0 #define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT) #define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16 #define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT) #define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */ #define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0 #define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT) #define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16 #define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT) #define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */ #define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0 #define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT) #define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16 #define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT) #define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */ #define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0 #define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT) #define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16 #define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT) #define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */ #define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0 #define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT) #define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16 #define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT) #define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */ #define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0 #define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT) #define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16 #define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT) #define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */ #define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0 #define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT) #define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16 #define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT) #define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */ #define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0 #define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT) #define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16 #define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT) #define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */ #define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0 #define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT) #define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16 #define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT) #define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */ #define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0 #define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT) #define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16 #define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT) #define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */ #define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0 #define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT) #define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16 #define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT) #define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */ #define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0 #define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT) #define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16 #define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT) #define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */ #define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0 #define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT) #define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16 #define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT) #define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */ #define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0 #define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT) #define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16 #define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT) #define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4 #define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT) #define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10 #define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT) #define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */ #define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0 #define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT) #define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1 #define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT) #define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2 #define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT) #define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6 #define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT) #define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16 #define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT) #define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */ #define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0 #define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT) #define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */ #define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0 #define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT) #define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16 #define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT) #define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */ #define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0 #define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT) #define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16 #define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT) #define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */ #define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0 #define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT) #define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16 #define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT) #define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */ #define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0 #define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT) #define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16 #define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT) #define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */ #define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0 #define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT) #define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16 #define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT) #define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */ #define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0 #define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT) #define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16 #define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT) #define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */ #define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0 #define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT) #define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16 #define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT) #define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */ #define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0 #define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT) #define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */ #define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0 #define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT) #define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */ #define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0 #define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT) #define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */ #define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0 #define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT) #define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17 #define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT) #define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18 #define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT) #define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */ #define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0 #define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT) #define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15 #define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0 #define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT) #define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15 #define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0 #define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT) #define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15 #define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0 #define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT) #define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */ #define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0 #define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT) #define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */ #define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0 #define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT) #define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */ #define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0 #define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT) #define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26 #define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT) #define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27 #define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT) #define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28 #define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT) #define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29 #define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT) #define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30 #define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT) #define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31 #define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT) #define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */ #define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0 #define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT) #define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */ #define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0 #define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT) #define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */ #define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0 #define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT) #define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31 #define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0 #define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT) #define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31 #define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0 #define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT) #define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ #define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31 #define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0 #define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT) #define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31 #define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0 #define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT) #define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8 #define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT) #define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31 #define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0 #define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT) #define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31 #define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0 #define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT) #define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPE_VFUDACTRL_MAX_INDEX 31 #define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0 #define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT) #define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1 #define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT) #define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2 #define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT) #define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3 #define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT) #define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4 #define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT) #define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31 #define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0 #define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT) #define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31 #define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT) #define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */ #define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0 #define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT) #define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */ #define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0 #define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT) #define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */ #define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0 #define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT) #define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */ #define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0 #define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT) #define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4 #define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT) #define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16 #define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT) #define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31 #define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT) #define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */ #define I40E_PFPE_CQACK_PECQID_SHIFT 0 #define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT) #define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */ #define I40E_PFPE_CQARM_PECQID_SHIFT 0 #define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT) #define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */ #define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0 #define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT) #define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */ #define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0 #define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT) #define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16 #define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT) #define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */ #define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0 #define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT) #define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31 #define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT) #define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */ #define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0 #define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT) #define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */ #define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0 #define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT) #define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */ #define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0 #define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT) #define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 #define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) #define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */ #define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 #define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) #define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */ #define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0 #define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT) #define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */ #define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0 #define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT) #define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */ #define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0 #define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT) #define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1 #define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT) #define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2 #define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT) #define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3 #define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT) #define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4 #define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT) #define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */ #define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0 #define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT) #define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31 #define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT) #define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */ #define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0 #define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT) #define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20 #define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT) #define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */ #define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0 #define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT) #define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */ #define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7 #define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0 #define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT) #define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */ #define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0 #define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT) #define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13 #define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT) #define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30 #define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT) #define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */ #define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0 #define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT) #define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8 #define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT) #define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16 #define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT) #define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24 #define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT) #define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */ #define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0 #define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT) #define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */ #define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13 #define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT) #define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30 #define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT) #define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */ #define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0 #define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT) #define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */ #define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0 #define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT) #define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */ #define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0 #define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT) #define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16 #define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT) #define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31 #define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT) #define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */ #define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0 #define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT) #define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8 #define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT) #define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16 #define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT) #define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24 #define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT) #define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */ #define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0 #define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT) #define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8 #define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT) #define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16 #define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT) #define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24 #define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT) #define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */ #define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0 #define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT) #define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8 #define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT) #define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16 #define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT) #define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24 #define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT) #define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */ #define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0 #define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT) #define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8 #define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT) #define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16 #define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT) #define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ #define I40E_VFPE_AEQALLOC_MAX_INDEX 127 #define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0 #define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT) #define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ #define I40E_VFPE_CCQPHIGH_MAX_INDEX 127 #define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0 #define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT) #define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ #define I40E_VFPE_CCQPLOW_MAX_INDEX 127 #define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0 #define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT) #define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ #define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127 #define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0 #define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT) #define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4 #define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT) #define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16 #define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT) #define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31 #define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT) #define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ #define I40E_VFPE_CQACK_MAX_INDEX 127 #define I40E_VFPE_CQACK_PECQID_SHIFT 0 #define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT) #define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ #define I40E_VFPE_CQARM_MAX_INDEX 127 #define I40E_VFPE_CQARM_PECQID_SHIFT 0 #define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT) #define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ #define I40E_VFPE_CQPDB_MAX_INDEX 127 #define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0 #define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT) #define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ #define I40E_VFPE_CQPERRCODES_MAX_INDEX 127 #define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0 #define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT) #define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16 #define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT) #define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ #define I40E_VFPE_CQPTAIL_MAX_INDEX 127 #define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0 #define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT) #define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31 #define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT) #define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ #define I40E_VFPE_IPCONFIG0_MAX_INDEX 127 #define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0 #define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT) #define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 #define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) #define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ #define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127 #define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 #define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) #define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ #define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127 #define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0 #define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT) #define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ #define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127 #define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0 #define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT) #define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ #define I40E_VFPE_WQEALLOC_MAX_INDEX 127 #define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0 #define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT) #define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20 #define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT) #define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15 #define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0 #define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT) #define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15 #define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0 #define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT) #define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15 #define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0 #define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT) #define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15 #define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0 #define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT) #define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15 #define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0 #define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT) #define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15 #define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0 #define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT) #define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15 #define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0 #define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT) #define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15 #define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0 #define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT) #define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15 #define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0 #define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT) #define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15 #define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0 #define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT) #define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15 #define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0 #define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT) #define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15 #define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0 #define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT) #define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15 #define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0 #define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT) #define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15 #define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0 #define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT) #define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15 #define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0 #define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT) #define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15 #define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0 #define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT) #define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15 #define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0 #define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT) #define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15 #define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0 #define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT) #define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15 #define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0 #define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT) #define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15 #define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0 #define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT) #define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15 #define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0 #define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT) #define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15 #define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0 #define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT) #define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15 #define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0 #define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT) #define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15 #define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0 #define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT) #define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15 #define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0 #define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT) #define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15 #define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0 #define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT) #define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15 #define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0 #define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT) #define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15 #define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0 #define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT) #define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15 #define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0 #define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT) #define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15 #define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0 #define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT) #define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15 #define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0 #define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT) #define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15 #define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0 #define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT) #define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15 #define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0 #define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT) #define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15 #define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0 #define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT) #define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15 #define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0 #define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT) #define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15 #define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0 #define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT) #define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15 #define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0 #define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT) #define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15 #define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0 #define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT) #define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15 #define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0 #define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT) #define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15 #define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0 #define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT) #define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15 #define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0 #define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT) #define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15 #define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0 #define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT) #define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15 #define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0 #define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT) #define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15 #define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0 #define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT) #define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15 #define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0 #define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT) #define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15 #define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0 #define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT) #define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15 #define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0 #define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT) #define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15 #define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0 #define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT) #define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15 #define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0 #define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT) #define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15 #define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0 #define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT) #define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15 #define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0 #define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT) #define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15 #define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0 #define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT) #define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15 #define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0 #define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT) #define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15 #define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0 #define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT) #define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15 #define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0 #define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT) #define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15 #define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0 #define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT) #define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15 #define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0 #define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT) #define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15 #define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0 #define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT) #define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15 #define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0 #define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT) #define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15 #define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0 #define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT) #define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15 #define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0 #define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT) #define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15 #define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0 #define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT) #define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15 #define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0 #define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT) #define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15 #define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0 #define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT) #define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15 #define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0 #define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT) #define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15 #define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0 #define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT) #define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15 #define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0 #define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT) #define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15 #define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0 #define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT) #define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15 #define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0 #define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT) #define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15 #define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0 #define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT) #define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15 #define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0 #define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT) #define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15 #define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0 #define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT) #define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15 #define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0 #define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT) #define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ #define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15 #define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 #define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) #define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */ #define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0 #define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT) #define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */ #define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0 #define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT) #define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */ #define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0 #define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT) #define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */ #define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0 #define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT) #define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */ #define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0 #define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT) #define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */ #define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0 #define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT) #define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */ #define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0 #define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT) #define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */ #define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0 #define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT) #define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */ #define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0 #define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT) #define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */ #define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0 #define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT) #define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */ #define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0 #define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT) #define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */ #define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0 #define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT) #define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */ #define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0 #define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT) #define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */ #define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0 #define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT) #define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */ #define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0 #define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT) #define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */ #define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0 #define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT) #define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */ #define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0 #define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT) #define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */ #define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0 #define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT) #define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */ #define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0 #define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT) #define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */ #define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0 #define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT) #define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */ #define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0 #define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT) #define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */ #define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0 #define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT) #define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31 #define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0 #define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT) #define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31 #define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0 #define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT) #define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31 #define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0 #define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT) #define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31 #define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0 #define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT) #define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31 #define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0 #define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT) #define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31 #define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0 #define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT) #define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31 #define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0 #define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT) #define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31 #define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0 #define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT) #define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31 #define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0 #define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT) #define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31 #define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0 #define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT) #define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31 #define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0 #define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT) #define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31 #define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0 #define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT) #define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31 #define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0 #define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT) #define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31 #define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0 #define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT) #define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31 #define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0 #define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT) #define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31 #define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0 #define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT) #define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31 #define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0 #define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT) #define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31 #define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0 #define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT) #define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31 #define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0 #define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT) #define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31 #define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0 #define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT) #define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31 #define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0 #define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT) #define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31 #define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0 #define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT) #define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31 #define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0 #define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT) #define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31 #define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0 #define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT) #define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31 #define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0 #define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT) #define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31 #define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0 #define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT) #define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31 #define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0 #define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT) #define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31 #define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0 #define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT) #define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31 #define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0 #define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT) #define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31 #define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0 #define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT) #define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31 #define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0 #define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT) #define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31 #define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0 #define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT) #define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31 #define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0 #define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT) #define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31 #define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0 #define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT) #define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31 #define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0 #define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT) #define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31 #define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0 #define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT) #define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31 #define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0 #define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT) #define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31 #define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0 #define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT) #define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31 #define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0 #define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT) #define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31 #define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0 #define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT) #define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31 #define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0 #define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT) #define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31 #define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0 #define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT) #define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31 #define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0 #define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT) #define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31 #define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0 #define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT) #define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31 #define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0 #define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT) #define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31 #define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0 #define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT) #define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31 #define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0 #define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT) #define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31 #define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0 #define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT) #define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31 #define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0 #define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT) #define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31 #define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0 #define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT) #define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31 #define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0 #define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT) #define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31 #define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0 #define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT) #define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31 #define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0 #define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT) #define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31 #define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0 #define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT) #define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31 #define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0 #define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT) #define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31 #define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0 #define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT) #define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31 #define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0 #define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT) #define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31 #define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0 #define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT) #define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31 #define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0 #define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT) #define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31 #define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0 #define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT) #define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31 #define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0 #define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT) #define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31 #define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0 #define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT) #define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31 #define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0 #define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT) #define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31 #define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0 #define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT) #define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31 #define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0 #define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT) #define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31 #define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0 #define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT) #define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31 #define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0 #define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT) #define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31 #define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0 #define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT) #define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31 #define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0 #define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT) #define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31 #define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0 #define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT) #define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31 #define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0 #define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT) #define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31 #define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0 #define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT) #define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31 #define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0 #define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT) #define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ #define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31 #define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 #define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) #define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */ #define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0 #define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT) #define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */ #define I40E_GLQF_APBVT_MAX_INDEX 2047 #define I40E_GLQF_APBVT_APBVT_SHIFT 0 #define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT) #define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */ #define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63 #define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0 #define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT) #define I40E_GLQF_FD_MSK(_i, _j) (0x00267200 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ #define I40E_GLQF_FD_MSK_MAX_INDEX 1 #define I40E_GLQF_FD_MSK_MASK_SHIFT 0 #define I40E_GLQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_FD_MSK_MASK_SHIFT) #define I40E_GLQF_FD_MSK_OFFSET_SHIFT 16 #define I40E_GLQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_FD_MSK_OFFSET_SHIFT) #define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ #define I40E_GLQF_HASH_INSET_MAX_INDEX 1 #define I40E_GLQF_HASH_INSET_INSET_SHIFT 0 #define I40E_GLQF_HASH_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_HASH_INSET_INSET_SHIFT) #define I40E_GLQF_HASH_MSK(_i, _j) (0x00267A00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ #define I40E_GLQF_HASH_MSK_MAX_INDEX 1 #define I40E_GLQF_HASH_MSK_MASK_SHIFT 0 #define I40E_GLQF_HASH_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_HASH_MSK_MASK_SHIFT) #define I40E_GLQF_HASH_MSK_OFFSET_SHIFT 16 #define I40E_GLQF_HASH_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_HASH_MSK_OFFSET_SHIFT) #define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */ #define I40E_GLQF_ORT_MAX_INDEX 63 #define I40E_GLQF_ORT_PIT_INDX_SHIFT 0 #define I40E_GLQF_ORT_PIT_INDX_MASK I40E_MASK(0x1F, I40E_GLQF_ORT_PIT_INDX_SHIFT) #define I40E_GLQF_ORT_FIELD_CNT_SHIFT 5 #define I40E_GLQF_ORT_FIELD_CNT_MASK I40E_MASK(0x3, I40E_GLQF_ORT_FIELD_CNT_SHIFT) #define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7 #define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) #define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4)) /* _i=0...23 */ /* Reset: CORER */ #define I40E_GLQF_PIT_MAX_INDEX 23 #define I40E_GLQF_PIT_SOURCE_OFF_SHIFT 0 #define I40E_GLQF_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_SOURCE_OFF_SHIFT) #define I40E_GLQF_PIT_FSIZE_SHIFT 5 #define I40E_GLQF_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_FSIZE_SHIFT) #define I40E_GLQF_PIT_DEST_OFF_SHIFT 10 #define I40E_GLQF_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_GLQF_PIT_DEST_OFF_SHIFT) #define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ #define I40E_GLQF_FDEVICTENA_MAX_INDEX 1 #define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0 #define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT) #define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */ #define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0 #define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT) #define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8 #define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT) #define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */ #define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0 #define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT) #define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5 #define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT) /* Redefined for X722 family */ #define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_X722_PFQF_HLUT_MAX_INDEX 127 #define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0 #define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT) #define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8 #define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT) #define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16 #define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT) #define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24 #define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT) #define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */ #define I40E_PFQF_HREGION_MAX_INDEX 7 #define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 #define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT) #define I40E_PFQF_HREGION_REGION_0_SHIFT 1 #define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT) #define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 #define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT) #define I40E_PFQF_HREGION_REGION_1_SHIFT 5 #define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT) #define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 #define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT) #define I40E_PFQF_HREGION_REGION_2_SHIFT 9 #define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT) #define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 #define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT) #define I40E_PFQF_HREGION_REGION_3_SHIFT 13 #define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT) #define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 #define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT) #define I40E_PFQF_HREGION_REGION_4_SHIFT 17 #define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT) #define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 #define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT) #define I40E_PFQF_HREGION_REGION_5_SHIFT 21 #define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT) #define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 #define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT) #define I40E_PFQF_HREGION_REGION_6_SHIFT 25 #define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT) #define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 #define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT) #define I40E_PFQF_HREGION_REGION_7_SHIFT 29 #define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT) #define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8 #define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT) #define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */ #define I40E_VSIQF_HKEY_MAX_INDEX 12 #define I40E_VSIQF_HKEY_KEY_0_SHIFT 0 #define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT) #define I40E_VSIQF_HKEY_KEY_1_SHIFT 8 #define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT) #define I40E_VSIQF_HKEY_KEY_2_SHIFT 16 #define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT) #define I40E_VSIQF_HKEY_KEY_3_SHIFT 24 #define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT) #define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */ #define I40E_VSIQF_HLUT_MAX_INDEX 15 #define I40E_VSIQF_HLUT_LUT0_SHIFT 0 #define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT) #define I40E_VSIQF_HLUT_LUT1_SHIFT 8 #define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT) #define I40E_VSIQF_HLUT_LUT2_SHIFT 16 #define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT) #define I40E_VSIQF_HLUT_LUT3_SHIFT 24 #define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT) #define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */ #define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0 #define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT) #define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */ #define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0 #define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT) #define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30 #define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT) #define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30 #define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT) #define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */ #define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0 #define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT) #define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */ #define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0 #define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT) #define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */ #define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0 #define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT) #define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */ #define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0 #define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT) #define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4 #define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT) #define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16 #define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT) #define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31 #define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT) #define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */ #define I40E_VFPE_CQACK1_PECQID_SHIFT 0 #define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT) #define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */ #define I40E_VFPE_CQARM1_PECQID_SHIFT 0 #define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT) #define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */ #define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0 #define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT) #define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */ #define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0 #define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT) #define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16 #define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT) #define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */ #define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0 #define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT) #define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31 #define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT) #define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */ #define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0 #define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT) #define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16 #define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT) #define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */ #define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0 #define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT) #define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */ #define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0 #define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT) #define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */ #define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0 #define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT) #define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */ #define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0 #define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT) #define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20 #define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT) #endif /* _I40E_REGISTER_H_ */ Index: releng/11.3/sys/dev/ixl/i40e_status.h =================================================================== --- releng/11.3/sys/dev/ixl/i40e_status.h (revision 349180) +++ releng/11.3/sys/dev/ixl/i40e_status.h (revision 349181) @@ -1,109 +1,109 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _I40E_STATUS_H_ #define _I40E_STATUS_H_ /* Error Codes */ enum i40e_status_code { I40E_SUCCESS = 0, I40E_ERR_NVM = -1, I40E_ERR_NVM_CHECKSUM = -2, I40E_ERR_PHY = -3, I40E_ERR_CONFIG = -4, I40E_ERR_PARAM = -5, I40E_ERR_MAC_TYPE = -6, I40E_ERR_UNKNOWN_PHY = -7, I40E_ERR_LINK_SETUP = -8, I40E_ERR_ADAPTER_STOPPED = -9, I40E_ERR_INVALID_MAC_ADDR = -10, I40E_ERR_DEVICE_NOT_SUPPORTED = -11, I40E_ERR_MASTER_REQUESTS_PENDING = -12, I40E_ERR_INVALID_LINK_SETTINGS = -13, I40E_ERR_AUTONEG_NOT_COMPLETE = -14, I40E_ERR_RESET_FAILED = -15, I40E_ERR_SWFW_SYNC = -16, I40E_ERR_NO_AVAILABLE_VSI = -17, I40E_ERR_NO_MEMORY = -18, I40E_ERR_BAD_PTR = -19, I40E_ERR_RING_FULL = -20, I40E_ERR_INVALID_PD_ID = -21, I40E_ERR_INVALID_QP_ID = -22, I40E_ERR_INVALID_CQ_ID = -23, I40E_ERR_INVALID_CEQ_ID = -24, I40E_ERR_INVALID_AEQ_ID = -25, I40E_ERR_INVALID_SIZE = -26, I40E_ERR_INVALID_ARP_INDEX = -27, I40E_ERR_INVALID_FPM_FUNC_ID = -28, I40E_ERR_QP_INVALID_MSG_SIZE = -29, I40E_ERR_QP_TOOMANY_WRS_POSTED = -30, I40E_ERR_INVALID_FRAG_COUNT = -31, I40E_ERR_QUEUE_EMPTY = -32, I40E_ERR_INVALID_ALIGNMENT = -33, I40E_ERR_FLUSHED_QUEUE = -34, I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35, I40E_ERR_INVALID_IMM_DATA_SIZE = -36, I40E_ERR_TIMEOUT = -37, I40E_ERR_OPCODE_MISMATCH = -38, I40E_ERR_CQP_COMPL_ERROR = -39, I40E_ERR_INVALID_VF_ID = -40, I40E_ERR_INVALID_HMCFN_ID = -41, I40E_ERR_BACKING_PAGE_ERROR = -42, I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43, I40E_ERR_INVALID_PBLE_INDEX = -44, I40E_ERR_INVALID_SD_INDEX = -45, I40E_ERR_INVALID_PAGE_DESC_INDEX = -46, I40E_ERR_INVALID_SD_TYPE = -47, I40E_ERR_MEMCPY_FAILED = -48, I40E_ERR_INVALID_HMC_OBJ_INDEX = -49, I40E_ERR_INVALID_HMC_OBJ_COUNT = -50, I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51, I40E_ERR_SRQ_ENABLED = -52, I40E_ERR_ADMIN_QUEUE_ERROR = -53, I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54, I40E_ERR_BUF_TOO_SHORT = -55, I40E_ERR_ADMIN_QUEUE_FULL = -56, I40E_ERR_ADMIN_QUEUE_NO_WORK = -57, I40E_ERR_BAD_IWARP_CQE = -58, I40E_ERR_NVM_BLANK_MODE = -59, I40E_ERR_NOT_IMPLEMENTED = -60, I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61, I40E_ERR_DIAG_TEST_FAILED = -62, I40E_ERR_NOT_READY = -63, I40E_NOT_SUPPORTED = -64, I40E_ERR_FIRMWARE_API_VERSION = -65, I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66, }; #endif /* _I40E_STATUS_H_ */ Index: releng/11.3/sys/dev/ixl/i40e_type.h =================================================================== --- releng/11.3/sys/dev/ixl/i40e_type.h (revision 349180) +++ releng/11.3/sys/dev/ixl/i40e_type.h (revision 349181) @@ -1,1701 +1,1737 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _I40E_TYPE_H_ #define _I40E_TYPE_H_ #include "i40e_status.h" #include "i40e_osdep.h" #include "i40e_register.h" #include "i40e_adminq.h" #include "i40e_hmc.h" #include "i40e_lan_hmc.h" #include "i40e_devids.h" #define BIT(a) (1UL << (a)) #define BIT_ULL(a) (1ULL << (a)) #ifndef I40E_MASK /* I40E_MASK is a macro used on 32 bit registers */ #define I40E_MASK(mask, shift) (mask << shift) #endif #define I40E_MAX_PF 16 #define I40E_MAX_PF_VSI 64 #define I40E_MAX_PF_QP 128 #define I40E_MAX_VSI_QP 16 -#define I40E_MAX_VF_VSI 3 +#define I40E_MAX_VF_VSI 4 #define I40E_MAX_CHAINED_RX_BUFFERS 5 #define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16 /* something less than 1 minute */ #define I40E_HEARTBEAT_TIMEOUT (HZ * 50) /* Max default timeout in ms, */ #define I40E_MAX_NVM_TIMEOUT 18000 /* Max timeout in ms for the phy to respond */ #define I40E_MAX_PHY_TIMEOUT 500 /* Check whether address is multicast. */ #define I40E_IS_MULTICAST(address) (bool)(((u8 *)(address))[0] & ((u8)0x01)) /* Check whether an address is broadcast. */ #define I40E_IS_BROADCAST(address) \ ((((u8 *)(address))[0] == ((u8)0xff)) && \ (((u8 *)(address))[1] == ((u8)0xff))) /* Switch from ms to the 1usec global time (this is the GTIME resolution) */ #define I40E_MS_TO_GTIME(time) ((time) * 1000) /* forward declaration */ struct i40e_hw; typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); #define ETH_ALEN 6 /* Data type manipulation macros. */ #define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF)) #define I40E_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF)) #define I40E_HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF)) #define I40E_LO_WORD(x) ((u16)((x) & 0xFFFF)) #define I40E_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF)) #define I40E_LO_BYTE(x) ((u8)((x) & 0xFF)) /* Number of Transmit Descriptors must be a multiple of 8. */ #define I40E_REQ_TX_DESCRIPTOR_MULTIPLE 8 /* Number of Receive Descriptors must be a multiple of 32 if * the number of descriptors is greater than 32. */ #define I40E_REQ_RX_DESCRIPTOR_MULTIPLE 32 #define I40E_DESC_UNUSED(R) \ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ (R)->next_to_clean - (R)->next_to_use - 1) /* bitfields for Tx queue mapping in QTX_CTL */ #define I40E_QTX_CTL_VF_QUEUE 0x0 #define I40E_QTX_CTL_VM_QUEUE 0x1 #define I40E_QTX_CTL_PF_QUEUE 0x2 /* debug masks - set these bits in hw->debug_mask to control output */ enum i40e_debug_mask { I40E_DEBUG_INIT = 0x00000001, I40E_DEBUG_RELEASE = 0x00000002, I40E_DEBUG_LINK = 0x00000010, I40E_DEBUG_PHY = 0x00000020, I40E_DEBUG_HMC = 0x00000040, I40E_DEBUG_NVM = 0x00000080, I40E_DEBUG_LAN = 0x00000100, I40E_DEBUG_FLOW = 0x00000200, I40E_DEBUG_DCB = 0x00000400, I40E_DEBUG_DIAG = 0x00000800, I40E_DEBUG_FD = 0x00001000, I40E_DEBUG_AQ_MESSAGE = 0x01000000, I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000, I40E_DEBUG_AQ_COMMAND = 0x06000000, I40E_DEBUG_AQ = 0x0F000000, I40E_DEBUG_USER = 0xF0000000, I40E_DEBUG_ALL = 0xFFFFFFFF }; /* PCI Bus Info */ #define I40E_PCI_LINK_STATUS 0xB2 #define I40E_PCI_LINK_WIDTH 0x3F0 #define I40E_PCI_LINK_WIDTH_1 0x10 #define I40E_PCI_LINK_WIDTH_2 0x20 #define I40E_PCI_LINK_WIDTH_4 0x40 #define I40E_PCI_LINK_WIDTH_8 0x80 #define I40E_PCI_LINK_SPEED 0xF #define I40E_PCI_LINK_SPEED_2500 0x1 #define I40E_PCI_LINK_SPEED_5000 0x2 #define I40E_PCI_LINK_SPEED_8000 0x3 #define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_MASK(1, \ I40E_GLGEN_MSCA_STCODE_SHIFT) #define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_MASK(1, \ I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_MASK(2, \ I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_MASK(0, \ I40E_GLGEN_MSCA_STCODE_SHIFT) #define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_MASK(0, \ I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_MASK(1, \ I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK I40E_MASK(2, \ I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_MASK(3, \ I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_PHY_COM_REG_PAGE 0x1E #define I40E_PHY_LED_LINK_MODE_MASK 0xF0 #define I40E_PHY_LED_MANUAL_ON 0x100 #define I40E_PHY_LED_PROV_REG_1 0xC430 #define I40E_PHY_LED_MODE_MASK 0xFFFF #define I40E_PHY_LED_MODE_ORIG 0x80000000 /* Memory types */ enum i40e_memset_type { I40E_NONDMA_MEM = 0, I40E_DMA_MEM }; /* Memcpy types */ enum i40e_memcpy_type { I40E_NONDMA_TO_NONDMA = 0, I40E_NONDMA_TO_DMA, I40E_DMA_TO_DMA, I40E_DMA_TO_NONDMA }; /* These are structs for managing the hardware information and the operations. * The structures of function pointers are filled out at init time when we * know for sure exactly which hardware we're working with. This gives us the * flexibility of using the same main driver code but adapting to slightly * different hardware needs as new parts are developed. For this architecture, * the Firmware and AdminQ are intended to insulate the driver from most of the * future changes, but these structures will also do part of the job. */ enum i40e_mac_type { I40E_MAC_UNKNOWN = 0, I40E_MAC_XL710, I40E_MAC_VF, I40E_MAC_X722, I40E_MAC_X722_VF, I40E_MAC_GENERIC, }; enum i40e_media_type { I40E_MEDIA_TYPE_UNKNOWN = 0, I40E_MEDIA_TYPE_FIBER, I40E_MEDIA_TYPE_BASET, I40E_MEDIA_TYPE_BACKPLANE, I40E_MEDIA_TYPE_CX4, I40E_MEDIA_TYPE_DA, I40E_MEDIA_TYPE_VIRTUAL }; enum i40e_fc_mode { I40E_FC_NONE = 0, I40E_FC_RX_PAUSE, I40E_FC_TX_PAUSE, I40E_FC_FULL, I40E_FC_PFC, I40E_FC_DEFAULT }; enum i40e_set_fc_aq_failures { I40E_SET_FC_AQ_FAIL_NONE = 0, I40E_SET_FC_AQ_FAIL_GET = 1, I40E_SET_FC_AQ_FAIL_SET = 2, I40E_SET_FC_AQ_FAIL_UPDATE = 4, I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6 }; enum i40e_vsi_type { I40E_VSI_MAIN = 0, I40E_VSI_VMDQ1 = 1, I40E_VSI_VMDQ2 = 2, I40E_VSI_CTRL = 3, I40E_VSI_FCOE = 4, I40E_VSI_MIRROR = 5, I40E_VSI_SRIOV = 6, I40E_VSI_FDIR = 7, I40E_VSI_TYPE_UNKNOWN }; enum i40e_queue_type { I40E_QUEUE_TYPE_RX = 0, I40E_QUEUE_TYPE_TX, I40E_QUEUE_TYPE_PE_CEQ, I40E_QUEUE_TYPE_UNKNOWN }; struct i40e_link_status { enum i40e_aq_phy_type phy_type; enum i40e_aq_link_speed link_speed; u8 link_info; u8 an_info; u8 req_fec_info; u8 fec_info; u8 ext_info; u8 loopback; /* is Link Status Event notification to SW enabled */ bool lse_enable; u16 max_frame_size; bool crc_enable; u8 pacing; u8 requested_speeds; u8 module_type[3]; /* 1st byte: module identifier */ #define I40E_MODULE_TYPE_SFP 0x03 #define I40E_MODULE_TYPE_QSFP 0x0D /* 2nd byte: ethernet compliance codes for 10/40G */ #define I40E_MODULE_TYPE_40G_ACTIVE 0x01 #define I40E_MODULE_TYPE_40G_LR4 0x02 #define I40E_MODULE_TYPE_40G_SR4 0x04 #define I40E_MODULE_TYPE_40G_CR4 0x08 #define I40E_MODULE_TYPE_10G_BASE_SR 0x10 #define I40E_MODULE_TYPE_10G_BASE_LR 0x20 #define I40E_MODULE_TYPE_10G_BASE_LRM 0x40 #define I40E_MODULE_TYPE_10G_BASE_ER 0x80 /* 3rd byte: ethernet compliance codes for 1G */ #define I40E_MODULE_TYPE_1000BASE_SX 0x01 #define I40E_MODULE_TYPE_1000BASE_LX 0x02 #define I40E_MODULE_TYPE_1000BASE_CX 0x04 #define I40E_MODULE_TYPE_1000BASE_T 0x08 }; struct i40e_phy_info { struct i40e_link_status link_info; struct i40e_link_status link_info_old; bool get_link_info; enum i40e_media_type media_type; /* all the phy types the NVM is capable of */ u64 phy_types; }; #define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII) #define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX) #define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4) #define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR) #define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4) #define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI) #define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI) #define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI) #define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI) #define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI) #define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU) #define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU) #define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC) #define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC) #define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX) #define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T) #define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T) #define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR) #define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR) #define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU) #define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1) #define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4) #define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4) #define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4) #define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX) #define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX) #define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \ BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL) #define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2) /* * Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So, * a shift is needed to adjust for this with values larger than 31. The * only affected values are I40E_PHY_TYPE_25GBASE_*. */ #define I40E_PHY_TYPE_OFFSET 1 #define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \ I40E_PHY_TYPE_OFFSET) #define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \ I40E_PHY_TYPE_OFFSET) #define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \ I40E_PHY_TYPE_OFFSET) #define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \ I40E_PHY_TYPE_OFFSET) #define I40E_CAP_PHY_TYPE_25GBASE_AOC BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC + \ I40E_PHY_TYPE_OFFSET) #define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \ I40E_PHY_TYPE_OFFSET) +/* Offset for 2.5G/5G PHY Types value to bit number conversion */ +#define I40E_PHY_TYPE_OFFSET2 (-10) +#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T + \ + I40E_PHY_TYPE_OFFSET2) +#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T + \ + I40E_PHY_TYPE_OFFSET2) #define I40E_HW_CAP_MAX_GPIO 30 #define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0 #define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1 enum i40e_acpi_programming_method { I40E_ACPI_PROGRAMMING_METHOD_HW_FVL = 0, I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1 }; #define I40E_WOL_SUPPORT_MASK 0x1 #define I40E_ACPI_PROGRAMMING_METHOD_MASK 0x2 #define I40E_PROXY_SUPPORT_MASK 0x4 /* Capabilities of a PF or a VF or the whole device */ struct i40e_hw_capabilities { u32 switch_mode; #define I40E_NVM_IMAGE_TYPE_EVB 0x0 #define I40E_NVM_IMAGE_TYPE_CLOUD 0x2 #define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3 /* Cloud filter modes: * Mode1: Filter on L4 port only * Mode2: Filter for non-tunneled traffic * Mode3: Filter for tunnel traffic */ #define I40E_CLOUD_FILTER_MODE1 0x6 #define I40E_CLOUD_FILTER_MODE2 0x7 #define I40E_CLOUD_FILTER_MODE3 0x8 u32 management_mode; u32 mng_protocols_over_mctp; #define I40E_MNG_PROTOCOL_PLDM 0x2 #define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4 #define I40E_MNG_PROTOCOL_NCSI 0x8 u32 npar_enable; u32 os2bmc; u32 valid_functions; bool sr_iov_1_1; bool vmdq; bool evb_802_1_qbg; /* Edge Virtual Bridging */ bool evb_802_1_qbh; /* Bridge Port Extension */ bool dcb; bool fcoe; bool iscsi; /* Indicates iSCSI enabled */ bool flex10_enable; bool flex10_capable; u32 flex10_mode; #define I40E_FLEX10_MODE_UNKNOWN 0x0 #define I40E_FLEX10_MODE_DCC 0x1 #define I40E_FLEX10_MODE_DCI 0x2 u32 flex10_status; #define I40E_FLEX10_STATUS_DCC_ERROR 0x1 #define I40E_FLEX10_STATUS_VC_MODE 0x2 bool sec_rev_disabled; bool update_disabled; #define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1 #define I40E_NVM_MGMT_UPDATE_DISABLED 0x2 bool mgmt_cem; bool ieee_1588; bool iwarp; bool fd; u32 fd_filters_guaranteed; u32 fd_filters_best_effort; bool rss; u32 rss_table_size; u32 rss_table_entry_width; bool led[I40E_HW_CAP_MAX_GPIO]; bool sdp[I40E_HW_CAP_MAX_GPIO]; u32 nvm_image_type; u32 num_flow_director_filters; u32 num_vfs; u32 vf_base_id; u32 num_vsis; u32 num_rx_qp; u32 num_tx_qp; u32 base_queue; u32 num_msix_vectors; u32 num_msix_vectors_vf; u32 led_pin_num; u32 sdp_pin_num; u32 mdio_port_num; u32 mdio_port_mode; u8 rx_buf_chain_len; u32 enabled_tcmap; u32 maxtc; u64 wr_csr_prot; bool apm_wol_support; enum i40e_acpi_programming_method acpi_prog_method; bool proxy_support; }; struct i40e_mac_info { enum i40e_mac_type type; u8 addr[ETH_ALEN]; u8 perm_addr[ETH_ALEN]; u8 san_addr[ETH_ALEN]; u8 port_addr[ETH_ALEN]; u16 max_fcoeq; }; enum i40e_aq_resources_ids { I40E_NVM_RESOURCE_ID = 1 }; enum i40e_aq_resource_access_type { I40E_RESOURCE_READ = 1, I40E_RESOURCE_WRITE }; struct i40e_nvm_info { u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */ u32 timeout; /* [ms] */ u16 sr_size; /* Shadow RAM size in words */ bool blank_nvm_mode; /* is NVM empty (no FW present)*/ u16 version; /* NVM package version */ u32 eetrack; /* NVM data version */ u32 oem_ver; /* OEM version info */ }; /* definitions used in NVM update support */ enum i40e_nvmupd_cmd { I40E_NVMUPD_INVALID, I40E_NVMUPD_READ_CON, I40E_NVMUPD_READ_SNT, I40E_NVMUPD_READ_LCB, I40E_NVMUPD_READ_SA, I40E_NVMUPD_WRITE_ERA, I40E_NVMUPD_WRITE_CON, I40E_NVMUPD_WRITE_SNT, I40E_NVMUPD_WRITE_LCB, I40E_NVMUPD_WRITE_SA, I40E_NVMUPD_CSUM_CON, I40E_NVMUPD_CSUM_SA, I40E_NVMUPD_CSUM_LCB, I40E_NVMUPD_STATUS, I40E_NVMUPD_EXEC_AQ, I40E_NVMUPD_GET_AQ_RESULT, I40E_NVMUPD_GET_AQ_EVENT, + I40E_NVMUPD_FEATURES, }; enum i40e_nvmupd_state { I40E_NVMUPD_STATE_INIT, I40E_NVMUPD_STATE_READING, I40E_NVMUPD_STATE_WRITING, I40E_NVMUPD_STATE_INIT_WAIT, I40E_NVMUPD_STATE_WRITE_WAIT, I40E_NVMUPD_STATE_ERROR }; /* nvm_access definition and its masks/shifts need to be accessible to * application, core driver, and shared code. Where is the right file? */ #define I40E_NVM_READ 0xB #define I40E_NVM_WRITE 0xC #define I40E_NVM_MOD_PNT_MASK 0xFF #define I40E_NVM_TRANS_SHIFT 8 #define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT) #define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12 #define I40E_NVM_PRESERVATION_FLAGS_MASK \ (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT) #define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01 #define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02 #define I40E_NVM_CON 0x0 #define I40E_NVM_SNT 0x1 #define I40E_NVM_LCB 0x2 #define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) #define I40E_NVM_ERA 0x4 #define I40E_NVM_CSUM 0x8 #define I40E_NVM_AQE 0xe #define I40E_NVM_EXEC 0xf +#define I40E_NVM_EXEC_GET_AQ_RESULT 0x0 +#define I40E_NVM_EXEC_FEATURES 0xe +#define I40E_NVM_EXEC_STATUS 0xf + #define I40E_NVM_ADAPT_SHIFT 16 #define I40E_NVM_ADAPT_MASK (0xffffULL << I40E_NVM_ADAPT_SHIFT) #define I40E_NVMUPD_MAX_DATA 4096 #define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */ struct i40e_nvm_access { u32 command; u32 config; u32 offset; /* in bytes */ u32 data_size; /* in bytes */ u8 data[1]; }; +/* NVMUpdate features API */ +#define I40E_NVMUPD_FEATURES_API_VER_MAJOR 0 +#define I40E_NVMUPD_FEATURES_API_VER_MINOR 14 +#define I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN 12 + +#define I40E_NVMUPD_FEATURE_FLAT_NVM_SUPPORT BIT(0) + +struct i40e_nvmupd_features { + u8 major; + u8 minor; + u16 size; + u8 features[I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN]; +}; + /* (Q)SFP module access definitions */ #define I40E_I2C_EEPROM_DEV_ADDR 0xA0 #define I40E_I2C_EEPROM_DEV_ADDR2 0xA2 #define I40E_MODULE_TYPE_ADDR 0x00 #define I40E_MODULE_REVISION_ADDR 0x01 #define I40E_MODULE_SFF_8472_COMP 0x5E #define I40E_MODULE_SFF_8472_SWAP 0x5C #define I40E_MODULE_SFF_ADDR_MODE 0x04 #define I40E_MODULE_SFF_DIAG_CAPAB 0x40 #define I40E_MODULE_TYPE_QSFP_PLUS 0x0D #define I40E_MODULE_TYPE_QSFP28 0x11 #define I40E_MODULE_QSFP_MAX_LEN 640 /* PCI bus types */ enum i40e_bus_type { i40e_bus_type_unknown = 0, i40e_bus_type_pci, i40e_bus_type_pcix, i40e_bus_type_pci_express, i40e_bus_type_reserved }; /* PCI bus speeds */ enum i40e_bus_speed { i40e_bus_speed_unknown = 0, i40e_bus_speed_33 = 33, i40e_bus_speed_66 = 66, i40e_bus_speed_100 = 100, i40e_bus_speed_120 = 120, i40e_bus_speed_133 = 133, i40e_bus_speed_2500 = 2500, i40e_bus_speed_5000 = 5000, i40e_bus_speed_8000 = 8000, i40e_bus_speed_reserved }; /* PCI bus widths */ enum i40e_bus_width { i40e_bus_width_unknown = 0, i40e_bus_width_pcie_x1 = 1, i40e_bus_width_pcie_x2 = 2, i40e_bus_width_pcie_x4 = 4, i40e_bus_width_pcie_x8 = 8, i40e_bus_width_32 = 32, i40e_bus_width_64 = 64, i40e_bus_width_reserved }; /* Bus parameters */ struct i40e_bus_info { enum i40e_bus_speed speed; enum i40e_bus_width width; enum i40e_bus_type type; u16 func; u16 device; u16 lan_id; u16 bus_id; }; /* Flow control (FC) parameters */ struct i40e_fc_info { enum i40e_fc_mode current_mode; /* FC mode in effect */ enum i40e_fc_mode requested_mode; /* FC mode requested by caller */ }; #define I40E_MAX_TRAFFIC_CLASS 8 #define I40E_MAX_USER_PRIORITY 8 #define I40E_DCBX_MAX_APPS 32 #define I40E_LLDPDU_SIZE 1500 #define I40E_TLV_STATUS_OPER 0x1 #define I40E_TLV_STATUS_SYNC 0x2 #define I40E_TLV_STATUS_ERR 0x4 #define I40E_CEE_OPER_MAX_APPS 3 #define I40E_APP_PROTOID_FCOE 0x8906 #define I40E_APP_PROTOID_ISCSI 0x0cbc #define I40E_APP_PROTOID_FIP 0x8914 #define I40E_APP_SEL_ETHTYPE 0x1 #define I40E_APP_SEL_TCPIP 0x2 #define I40E_CEE_APP_SEL_ETHTYPE 0x0 #define I40E_CEE_APP_SEL_TCPIP 0x1 /* CEE or IEEE 802.1Qaz ETS Configuration data */ struct i40e_dcb_ets_config { u8 willing; u8 cbs; u8 maxtcs; u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; }; /* CEE or IEEE 802.1Qaz PFC Configuration data */ struct i40e_dcb_pfc_config { u8 willing; u8 mbc; u8 pfccap; u8 pfcenable; }; /* CEE or IEEE 802.1Qaz Application Priority data */ struct i40e_dcb_app_priority_table { u8 priority; u8 selector; u16 protocolid; }; struct i40e_dcbx_config { u8 dcbx_mode; #define I40E_DCBX_MODE_CEE 0x1 #define I40E_DCBX_MODE_IEEE 0x2 u8 app_mode; #define I40E_DCBX_APPS_NON_WILLING 0x1 u32 numapps; u32 tlv_status; /* CEE mode TLV status */ struct i40e_dcb_ets_config etscfg; struct i40e_dcb_ets_config etsrec; struct i40e_dcb_pfc_config pfc; struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS]; }; /* Port hardware description */ struct i40e_hw { u8 *hw_addr; void *back; /* subsystem structs */ struct i40e_phy_info phy; struct i40e_mac_info mac; struct i40e_bus_info bus; struct i40e_nvm_info nvm; struct i40e_fc_info fc; /* pci info */ u16 device_id; u16 vendor_id; u16 subsystem_device_id; u16 subsystem_vendor_id; u8 revision_id; u8 port; bool adapter_stopped; /* capabilities for entire device and PCI func */ struct i40e_hw_capabilities dev_caps; struct i40e_hw_capabilities func_caps; /* Flow Director shared filter space */ u16 fdir_shared_filter_count; /* device profile info */ u8 pf_id; u16 main_vsi_seid; /* for multi-function MACs */ u16 partition_id; u16 num_partitions; u16 num_ports; /* Closest numa node to the device */ u16 numa_node; /* Admin Queue info */ struct i40e_adminq_info aq; /* state of nvm update process */ enum i40e_nvmupd_state nvmupd_state; struct i40e_aq_desc nvm_wb_desc; struct i40e_aq_desc nvm_aq_event_desc; struct i40e_virt_mem nvm_buff; bool nvm_release_on_done; u16 nvm_wait_opcode; /* HMC info */ struct i40e_hmc_info hmc; /* HMC info struct */ /* LLDP/DCBX Status */ u16 dcbx_status; /* DCBX info */ struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */ struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */ struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */ /* WoL and proxy support */ u16 num_wol_proxy_filters; u16 wol_proxy_vsi_seid; #define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0) #define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1) #define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2) #define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3) +#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4) +#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5) u64 flags; /* Used in set switch config AQ command */ u16 switch_tag; u16 first_tag; u16 second_tag; + /* NVMUpdate features */ + struct i40e_nvmupd_features nvmupd_features; + /* debug mask */ u32 debug_mask; char err_str[16]; }; static INLINE bool i40e_is_vf(struct i40e_hw *hw) { return (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF); } struct i40e_driver_version { u8 major_version; u8 minor_version; u8 build_version; u8 subbuild_version; u8 driver_string[32]; }; /* RX Descriptors */ union i40e_16byte_rx_desc { struct { __le64 pkt_addr; /* Packet buffer address */ __le64 hdr_addr; /* Header buffer address */ } read; struct { struct { struct { union { __le16 mirroring_status; __le16 fcoe_ctx_id; } mirr_fcoe; __le16 l2tag1; } lo_dword; union { __le32 rss; /* RSS Hash */ __le32 fd_id; /* Flow director filter id */ __le32 fcoe_param; /* FCoE DDP Context id */ } hi_dword; } qword0; struct { /* ext status/error/pktype/length */ __le64 status_error_len; } qword1; } wb; /* writeback */ }; union i40e_32byte_rx_desc { struct { __le64 pkt_addr; /* Packet buffer address */ __le64 hdr_addr; /* Header buffer address */ /* bit 0 of hdr_buffer_addr is DD bit */ __le64 rsvd1; __le64 rsvd2; } read; struct { struct { struct { union { __le16 mirroring_status; __le16 fcoe_ctx_id; } mirr_fcoe; __le16 l2tag1; } lo_dword; union { __le32 rss; /* RSS Hash */ __le32 fcoe_param; /* FCoE DDP Context id */ /* Flow director filter id in case of * Programming status desc WB */ __le32 fd_id; } hi_dword; } qword0; struct { /* status/error/pktype/length */ __le64 status_error_len; } qword1; struct { __le16 ext_status; /* extended status */ __le16 rsvd; __le16 l2tag2_1; __le16 l2tag2_2; } qword2; struct { union { __le32 flex_bytes_lo; __le32 pe_status; } lo_dword; union { __le32 flex_bytes_hi; __le32 fd_id; } hi_dword; } qword3; } wb; /* writeback */ }; #define I40E_RXD_QW0_MIRROR_STATUS_SHIFT 8 #define I40E_RXD_QW0_MIRROR_STATUS_MASK (0x3FUL << \ I40E_RXD_QW0_MIRROR_STATUS_SHIFT) #define I40E_RXD_QW0_FCOEINDX_SHIFT 0 #define I40E_RXD_QW0_FCOEINDX_MASK (0xFFFUL << \ I40E_RXD_QW0_FCOEINDX_SHIFT) enum i40e_rx_desc_status_bits { /* Note: These are predefined bit offsets */ I40E_RX_DESC_STATUS_DD_SHIFT = 0, I40E_RX_DESC_STATUS_EOF_SHIFT = 1, I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2, I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3, I40E_RX_DESC_STATUS_CRCP_SHIFT = 4, I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8, I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ I40E_RX_DESC_STATUS_FLM_SHIFT = 11, I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, I40E_RX_DESC_STATUS_RESERVED2_SHIFT = 16, /* 2 BITS */ I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18, I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */ }; #define I40E_RXD_QW1_STATUS_SHIFT 0 #define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) << \ I40E_RXD_QW1_STATUS_SHIFT) #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT #define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT #define I40E_RXD_QW1_STATUS_TSYNVALID_MASK BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) #define I40E_RXD_QW1_STATUS_UMBCAST_SHIFT I40E_RX_DESC_STATUS_UMBCAST #define I40E_RXD_QW1_STATUS_UMBCAST_MASK (0x3UL << \ I40E_RXD_QW1_STATUS_UMBCAST_SHIFT) enum i40e_rx_desc_fltstat_values { I40E_RX_DESC_FLTSTAT_NO_DATA = 0, I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ I40E_RX_DESC_FLTSTAT_RSV = 2, I40E_RX_DESC_FLTSTAT_RSS_HASH = 3, }; #define I40E_RXD_PACKET_TYPE_UNICAST 0 #define I40E_RXD_PACKET_TYPE_MULTICAST 1 #define I40E_RXD_PACKET_TYPE_BROADCAST 2 #define I40E_RXD_PACKET_TYPE_MIRRORED 3 #define I40E_RXD_QW1_ERROR_SHIFT 19 #define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT) enum i40e_rx_desc_error_bits { /* Note: These are predefined bit offsets */ I40E_RX_DESC_ERROR_RXE_SHIFT = 0, I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1, I40E_RX_DESC_ERROR_HBO_SHIFT = 2, I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */ I40E_RX_DESC_ERROR_IPE_SHIFT = 3, I40E_RX_DESC_ERROR_L4E_SHIFT = 4, I40E_RX_DESC_ERROR_EIPE_SHIFT = 5, I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6, I40E_RX_DESC_ERROR_PPRS_SHIFT = 7 }; enum i40e_rx_desc_error_l3l4e_fcoe_masks { I40E_RX_DESC_ERROR_L3L4E_NONE = 0, I40E_RX_DESC_ERROR_L3L4E_PROT = 1, I40E_RX_DESC_ERROR_L3L4E_FC = 2, I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3, I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4 }; #define I40E_RXD_QW1_PTYPE_SHIFT 30 #define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT) /* Packet type non-ip values */ enum i40e_rx_l2_ptype { I40E_RX_PTYPE_L2_RESERVED = 0, I40E_RX_PTYPE_L2_MAC_PAY2 = 1, I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, I40E_RX_PTYPE_L2_FIP_PAY2 = 3, I40E_RX_PTYPE_L2_OUI_PAY2 = 4, I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, I40E_RX_PTYPE_L2_LLDP_PAY2 = 6, I40E_RX_PTYPE_L2_ECP_PAY2 = 7, I40E_RX_PTYPE_L2_EVB_PAY2 = 8, I40E_RX_PTYPE_L2_QCN_PAY2 = 9, I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10, I40E_RX_PTYPE_L2_ARP = 11, I40E_RX_PTYPE_L2_FCOE_PAY3 = 12, I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21, I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58, I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87, I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124, I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153 }; struct i40e_rx_ptype_decoded { u32 ptype:8; u32 known:1; u32 outer_ip:1; u32 outer_ip_ver:1; u32 outer_frag:1; u32 tunnel_type:3; u32 tunnel_end_prot:2; u32 tunnel_end_frag:1; u32 inner_prot:4; u32 payload_layer:3; }; enum i40e_rx_ptype_outer_ip { I40E_RX_PTYPE_OUTER_L2 = 0, I40E_RX_PTYPE_OUTER_IP = 1 }; enum i40e_rx_ptype_outer_ip_ver { I40E_RX_PTYPE_OUTER_NONE = 0, I40E_RX_PTYPE_OUTER_IPV4 = 0, I40E_RX_PTYPE_OUTER_IPV6 = 1 }; enum i40e_rx_ptype_outer_fragmented { I40E_RX_PTYPE_NOT_FRAG = 0, I40E_RX_PTYPE_FRAG = 1 }; enum i40e_rx_ptype_tunnel_type { I40E_RX_PTYPE_TUNNEL_NONE = 0, I40E_RX_PTYPE_TUNNEL_IP_IP = 1, I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2, I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, }; enum i40e_rx_ptype_tunnel_end_prot { I40E_RX_PTYPE_TUNNEL_END_NONE = 0, I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1, I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2, }; enum i40e_rx_ptype_inner_prot { I40E_RX_PTYPE_INNER_PROT_NONE = 0, I40E_RX_PTYPE_INNER_PROT_UDP = 1, I40E_RX_PTYPE_INNER_PROT_TCP = 2, I40E_RX_PTYPE_INNER_PROT_SCTP = 3, I40E_RX_PTYPE_INNER_PROT_ICMP = 4, I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5 }; enum i40e_rx_ptype_payload_layer { I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, }; #define I40E_RX_PTYPE_BIT_MASK 0x0FFFFFFF #define I40E_RX_PTYPE_SHIFT 56 #define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38 #define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ I40E_RXD_QW1_LENGTH_PBUF_SHIFT) #define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52 #define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \ I40E_RXD_QW1_LENGTH_HBUF_SHIFT) #define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 #define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT) #define I40E_RXD_QW1_NEXTP_SHIFT 38 #define I40E_RXD_QW1_NEXTP_MASK (0x1FFFULL << I40E_RXD_QW1_NEXTP_SHIFT) #define I40E_RXD_QW2_EXT_STATUS_SHIFT 0 #define I40E_RXD_QW2_EXT_STATUS_MASK (0xFFFFFUL << \ I40E_RXD_QW2_EXT_STATUS_SHIFT) enum i40e_rx_desc_ext_status_bits { /* Note: These are predefined bit offsets */ I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0, I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, }; #define I40E_RXD_QW2_L2TAG2_SHIFT 0 #define I40E_RXD_QW2_L2TAG2_MASK (0xFFFFUL << I40E_RXD_QW2_L2TAG2_SHIFT) #define I40E_RXD_QW2_L2TAG3_SHIFT 16 #define I40E_RXD_QW2_L2TAG3_MASK (0xFFFFUL << I40E_RXD_QW2_L2TAG3_SHIFT) enum i40e_rx_desc_pe_status_bits { /* Note: These are predefined bit offsets */ I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */ I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */ I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */ I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24, I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25, I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26, I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27, I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28, I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29 }; #define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38 #define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000 #define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2 #define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT) #define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT 0 #define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_MASK (0x7FFFUL << \ I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT) #define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19 #define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT) enum i40e_rx_prog_status_desc_status_bits { /* Note: These are predefined bit offsets */ I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0, I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */ }; enum i40e_rx_prog_status_desc_prog_id_masks { I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1, I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2, I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4, }; enum i40e_rx_prog_status_desc_error_bits { /* Note: These are predefined bit offsets */ I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1, I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 }; #define I40E_TWO_BIT_MASK 0x3 #define I40E_THREE_BIT_MASK 0x7 #define I40E_FOUR_BIT_MASK 0xF #define I40E_EIGHTEEN_BIT_MASK 0x3FFFF /* TX Descriptor */ struct i40e_tx_desc { __le64 buffer_addr; /* Address of descriptor's data buf */ __le64 cmd_type_offset_bsz; }; #define I40E_TXD_QW1_DTYPE_SHIFT 0 #define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT) enum i40e_tx_desc_dtype_value { I40E_TX_DESC_DTYPE_DATA = 0x0, I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */ I40E_TX_DESC_DTYPE_CONTEXT = 0x1, I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2, I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8, I40E_TX_DESC_DTYPE_DDP_CTX = 0x9, I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB, I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC, I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD, I40E_TX_DESC_DTYPE_DESC_DONE = 0xF }; #define I40E_TXD_QW1_CMD_SHIFT 4 #define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT) enum i40e_tx_desc_cmd_bits { I40E_TX_DESC_CMD_EOP = 0x0001, I40E_TX_DESC_CMD_RS = 0x0002, I40E_TX_DESC_CMD_ICRC = 0x0004, I40E_TX_DESC_CMD_IL2TAG1 = 0x0008, I40E_TX_DESC_CMD_DUMMY = 0x0010, I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */ I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */ I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */ I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */ I40E_TX_DESC_CMD_FCOET = 0x0080, I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */ I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */ I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */ I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */ I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */ I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */ I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */ I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */ }; #define I40E_TXD_QW1_OFFSET_SHIFT 16 #define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \ I40E_TXD_QW1_OFFSET_SHIFT) enum i40e_tx_desc_length_fields { /* Note: These are predefined bit offsets */ I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */ I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */ }; #define I40E_TXD_QW1_MACLEN_MASK (0x7FUL << I40E_TX_DESC_LENGTH_MACLEN_SHIFT) #define I40E_TXD_QW1_IPLEN_MASK (0x7FUL << I40E_TX_DESC_LENGTH_IPLEN_SHIFT) #define I40E_TXD_QW1_L4LEN_MASK (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT) #define I40E_TXD_QW1_FCLEN_MASK (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT) #define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34 #define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \ I40E_TXD_QW1_TX_BUF_SZ_SHIFT) #define I40E_TXD_QW1_L2TAG1_SHIFT 48 #define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT) /* Context descriptors */ struct i40e_tx_context_desc { __le32 tunneling_params; __le16 l2tag2; __le16 rsvd; __le64 type_cmd_tso_mss; }; #define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0 #define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT) #define I40E_TXD_CTX_QW1_CMD_SHIFT 4 #define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT) enum i40e_tx_ctx_desc_cmd_bits { I40E_TX_CTX_DESC_TSO = 0x01, I40E_TX_CTX_DESC_TSYN = 0x02, I40E_TX_CTX_DESC_IL2TAG2 = 0x04, I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08, I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00, I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10, I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20, I40E_TX_CTX_DESC_SWTCH_VSI = 0x30, I40E_TX_CTX_DESC_SWPE = 0x40 }; #define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30 #define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) #define I40E_TXD_CTX_QW1_MSS_SHIFT 50 #define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \ I40E_TXD_CTX_QW1_MSS_SHIFT) #define I40E_TXD_CTX_QW1_VSI_SHIFT 50 #define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT) #define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0 #define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \ I40E_TXD_CTX_QW0_EXT_IP_SHIFT) enum i40e_tx_ctx_desc_eipt_offload { I40E_TX_CTX_EXT_IP_NONE = 0x0, I40E_TX_CTX_EXT_IP_IPV6 = 0x1, I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2, I40E_TX_CTX_EXT_IP_IPV4 = 0x3 }; #define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2 #define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT) #define I40E_TXD_CTX_QW0_NATT_SHIFT 9 #define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 #define I40E_TXD_CTX_QW0_EIP_NOINC_MASK BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK #define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12 #define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \ I40E_TXD_CTX_QW0_NATLEN_SHIFT) #define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19 #define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ I40E_TXD_CTX_QW0_DECTTL_SHIFT) #define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23 #define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT) struct i40e_nop_desc { __le64 rsvd; __le64 dtype_cmd; }; #define I40E_TXD_NOP_QW1_DTYPE_SHIFT 0 #define I40E_TXD_NOP_QW1_DTYPE_MASK (0xFUL << I40E_TXD_NOP_QW1_DTYPE_SHIFT) #define I40E_TXD_NOP_QW1_CMD_SHIFT 4 #define I40E_TXD_NOP_QW1_CMD_MASK (0x7FUL << I40E_TXD_NOP_QW1_CMD_SHIFT) enum i40e_tx_nop_desc_cmd_bits { /* Note: These are predefined bit offsets */ I40E_TX_NOP_DESC_EOP_SHIFT = 0, I40E_TX_NOP_DESC_RS_SHIFT = 1, I40E_TX_NOP_DESC_RSV_SHIFT = 2 /* 5 bits */ }; struct i40e_filter_program_desc { __le32 qindex_flex_ptype_vsi; __le32 rsvd; __le32 dtype_cmd_cntindex; __le32 fd_id; }; #define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0 #define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \ I40E_TXD_FLTR_QW0_QINDEX_SHIFT) #define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11 #define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \ I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) #define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17 #define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) /* Packet Classifier Types for filters */ enum i40e_filter_pctype { /* Note: Values 0-28 are reserved for future use. * Value 29, 30, 32 are not supported on XL710 and X710. */ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29, I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30, I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32, I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, /* Note: Values 37-38 are reserved for future use. * Value 39, 40, 42 are not supported on XL710 and X710. */ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39, I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40, I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42, I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, I40E_FILTER_PCTYPE_FRAG_IPV6 = 46, /* Note: Value 47 is reserved for future use */ I40E_FILTER_PCTYPE_FCOE_OX = 48, I40E_FILTER_PCTYPE_FCOE_RX = 49, I40E_FILTER_PCTYPE_FCOE_OTHER = 50, /* Note: Values 51-62 are reserved for future use */ I40E_FILTER_PCTYPE_L2_PAYLOAD = 63, }; enum i40e_filter_program_desc_dest { I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0, I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1, I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2, }; enum i40e_filter_program_desc_fd_status { I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0, I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1, I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2, I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3, }; #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 #define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) #define I40E_TXD_FLTR_QW1_DTYPE_SHIFT 0 #define I40E_TXD_FLTR_QW1_DTYPE_MASK (0xFUL << I40E_TXD_FLTR_QW1_DTYPE_SHIFT) #define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 #define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT) enum i40e_filter_program_desc_pcmd { I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1, I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2, }; #define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT) #define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) #define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \ I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) #define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \ I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT) #define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20 #define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) enum i40e_filter_type { I40E_FLOW_DIRECTOR_FLTR = 0, I40E_PE_QUAD_HASH_FLTR = 1, I40E_ETHERTYPE_FLTR, I40E_FCOE_CTX_FLTR, I40E_MAC_VLAN_FLTR, I40E_HASH_FLTR }; struct i40e_vsi_context { u16 seid; u16 uplink_seid; u16 vsi_number; u16 vsis_allocated; u16 vsis_unallocated; u16 flags; u8 pf_num; u8 vf_num; u8 connection_type; struct i40e_aqc_vsi_properties_data info; }; struct i40e_veb_context { u16 seid; u16 uplink_seid; u16 veb_number; u16 vebs_allocated; u16 vebs_unallocated; u16 flags; struct i40e_aqc_get_veb_parameters_completion info; }; /* Statistics collected by each port, VSI, VEB, and S-channel */ struct i40e_eth_stats { u64 rx_bytes; /* gorc */ u64 rx_unicast; /* uprc */ u64 rx_multicast; /* mprc */ u64 rx_broadcast; /* bprc */ u64 rx_discards; /* rdpc */ u64 rx_unknown_protocol; /* rupp */ u64 tx_bytes; /* gotc */ u64 tx_unicast; /* uptc */ u64 tx_multicast; /* mptc */ u64 tx_broadcast; /* bptc */ u64 tx_discards; /* tdpc */ u64 tx_errors; /* tepc */ }; /* Statistics collected per VEB per TC */ struct i40e_veb_tc_stats { u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS]; u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS]; u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS]; u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS]; }; /* Statistics collected by the MAC */ struct i40e_hw_port_stats { /* eth stats collected by the port */ struct i40e_eth_stats eth; /* additional port specific stats */ u64 tx_dropped_link_down; /* tdold */ u64 crc_errors; /* crcerrs */ u64 illegal_bytes; /* illerrc */ u64 error_bytes; /* errbc */ u64 mac_local_faults; /* mlfc */ u64 mac_remote_faults; /* mrfc */ u64 rx_length_errors; /* rlec */ u64 link_xon_rx; /* lxonrxc */ u64 link_xoff_rx; /* lxoffrxc */ u64 priority_xon_rx[8]; /* pxonrxc[8] */ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */ u64 link_xon_tx; /* lxontxc */ u64 link_xoff_tx; /* lxofftxc */ u64 priority_xon_tx[8]; /* pxontxc[8] */ u64 priority_xoff_tx[8]; /* pxofftxc[8] */ u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */ u64 rx_size_64; /* prc64 */ u64 rx_size_127; /* prc127 */ u64 rx_size_255; /* prc255 */ u64 rx_size_511; /* prc511 */ u64 rx_size_1023; /* prc1023 */ u64 rx_size_1522; /* prc1522 */ u64 rx_size_big; /* prc9522 */ u64 rx_undersize; /* ruc */ u64 rx_fragments; /* rfc */ u64 rx_oversize; /* roc */ u64 rx_jabber; /* rjc */ u64 tx_size_64; /* ptc64 */ u64 tx_size_127; /* ptc127 */ u64 tx_size_255; /* ptc255 */ u64 tx_size_511; /* ptc511 */ u64 tx_size_1023; /* ptc1023 */ u64 tx_size_1522; /* ptc1522 */ u64 tx_size_big; /* ptc9522 */ u64 mac_short_packet_dropped; /* mspdc */ u64 checksum_error; /* xec */ /* flow director stats */ u64 fd_atr_match; u64 fd_sb_match; u64 fd_atr_tunnel_match; u32 fd_atr_status; u32 fd_sb_status; /* EEE LPI */ u32 tx_lpi_status; u32 rx_lpi_status; u64 tx_lpi_count; /* etlpic */ u64 rx_lpi_count; /* erlpic */ }; /* Checksum and Shadow RAM pointers */ #define I40E_SR_NVM_CONTROL_WORD 0x00 #define I40E_SR_PCIE_ANALOG_CONFIG_PTR 0x03 #define I40E_SR_PHY_ANALOG_CONFIG_PTR 0x04 #define I40E_SR_OPTION_ROM_PTR 0x05 #define I40E_SR_RO_PCIR_REGS_AUTO_LOAD_PTR 0x06 #define I40E_SR_AUTO_GENERATED_POINTERS_PTR 0x07 #define I40E_SR_PCIR_REGS_AUTO_LOAD_PTR 0x08 #define I40E_SR_EMP_GLOBAL_MODULE_PTR 0x09 #define I40E_SR_RO_PCIE_LCB_PTR 0x0A #define I40E_SR_EMP_IMAGE_PTR 0x0B #define I40E_SR_PE_IMAGE_PTR 0x0C #define I40E_SR_CSR_PROTECTED_LIST_PTR 0x0D #define I40E_SR_MNG_CONFIG_PTR 0x0E #define I40E_EMP_MODULE_PTR 0x0F #define I40E_SR_EMP_MODULE_PTR 0x48 #define I40E_SR_PBA_FLAGS 0x15 #define I40E_SR_PBA_BLOCK_PTR 0x16 #define I40E_SR_BOOT_CONFIG_PTR 0x17 #define I40E_NVM_OEM_VER_OFF 0x83 #define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 #define I40E_SR_NVM_WAKE_ON_LAN 0x19 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 #define I40E_SR_PERMANENT_SAN_MAC_ADDRESS_PTR 0x28 #define I40E_SR_NVM_MAP_VERSION 0x29 #define I40E_SR_NVM_IMAGE_VERSION 0x2A #define I40E_SR_NVM_STRUCTURE_VERSION 0x2B #define I40E_SR_NVM_EETRACK_LO 0x2D #define I40E_SR_NVM_EETRACK_HI 0x2E #define I40E_SR_VPD_PTR 0x2F #define I40E_SR_PXE_SETUP_PTR 0x30 #define I40E_SR_PXE_CONFIG_CUST_OPTIONS_PTR 0x31 #define I40E_SR_NVM_ORIGINAL_EETRACK_LO 0x34 #define I40E_SR_NVM_ORIGINAL_EETRACK_HI 0x35 #define I40E_SR_SW_ETHERNET_MAC_ADDRESS_PTR 0x37 #define I40E_SR_POR_REGS_AUTO_LOAD_PTR 0x38 #define I40E_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A #define I40E_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B #define I40E_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C #define I40E_SR_PHY_ACTIVITY_LIST_PTR 0x3D #define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E #define I40E_SR_SW_CHECKSUM_WORD 0x3F #define I40E_SR_1ST_FREE_PROVISION_AREA_PTR 0x40 #define I40E_SR_4TH_FREE_PROVISION_AREA_PTR 0x42 #define I40E_SR_3RD_FREE_PROVISION_AREA_PTR 0x44 #define I40E_SR_2ND_FREE_PROVISION_AREA_PTR 0x46 #define I40E_SR_EMP_SR_SETTINGS_PTR 0x48 #define I40E_SR_FEATURE_CONFIGURATION_PTR 0x49 #define I40E_SR_CONFIGURATION_METADATA_PTR 0x4D #define I40E_SR_IMMEDIATE_VALUES_PTR 0x4E /* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */ #define I40E_SR_VPD_MODULE_MAX_SIZE 1024 #define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 #define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 #define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) #define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5) #define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12) #define I40E_PTR_TYPE BIT(15) #define I40E_SR_OCP_CFG_WORD0 0x2B #define I40E_SR_OCP_ENABLED BIT(15) /* Shadow RAM related */ #define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800 #define I40E_SR_BUF_ALIGNMENT 4096 #define I40E_SR_WORDS_IN_1KB 512 /* Checksum should be calculated such that after adding all the words, * including the checksum word itself, the sum should be 0xBABA. */ #define I40E_SR_SW_CHECKSUM_BASE 0xBABA #define I40E_SRRD_SRCTL_ATTEMPTS 100000 enum i40e_switch_element_types { I40E_SWITCH_ELEMENT_TYPE_MAC = 1, I40E_SWITCH_ELEMENT_TYPE_PF = 2, I40E_SWITCH_ELEMENT_TYPE_VF = 3, I40E_SWITCH_ELEMENT_TYPE_EMP = 4, I40E_SWITCH_ELEMENT_TYPE_BMC = 6, I40E_SWITCH_ELEMENT_TYPE_PE = 16, I40E_SWITCH_ELEMENT_TYPE_VEB = 17, I40E_SWITCH_ELEMENT_TYPE_PA = 18, I40E_SWITCH_ELEMENT_TYPE_VSI = 19, }; /* Supported EtherType filters */ enum i40e_ether_type_index { I40E_ETHER_TYPE_1588 = 0, I40E_ETHER_TYPE_FIP = 1, I40E_ETHER_TYPE_OUI_EXTENDED = 2, I40E_ETHER_TYPE_MAC_CONTROL = 3, I40E_ETHER_TYPE_LLDP = 4, I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5, I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6, I40E_ETHER_TYPE_QCN_CNM = 7, I40E_ETHER_TYPE_8021X = 8, I40E_ETHER_TYPE_ARP = 9, I40E_ETHER_TYPE_RSV1 = 10, I40E_ETHER_TYPE_RSV2 = 11, }; /* Filter context base size is 1K */ #define I40E_HASH_FILTER_BASE_SIZE 1024 /* Supported Hash filter values */ enum i40e_hash_filter_size { I40E_HASH_FILTER_SIZE_1K = 0, I40E_HASH_FILTER_SIZE_2K = 1, I40E_HASH_FILTER_SIZE_4K = 2, I40E_HASH_FILTER_SIZE_8K = 3, I40E_HASH_FILTER_SIZE_16K = 4, I40E_HASH_FILTER_SIZE_32K = 5, I40E_HASH_FILTER_SIZE_64K = 6, I40E_HASH_FILTER_SIZE_128K = 7, I40E_HASH_FILTER_SIZE_256K = 8, I40E_HASH_FILTER_SIZE_512K = 9, I40E_HASH_FILTER_SIZE_1M = 10, }; /* DMA context base size is 0.5K */ #define I40E_DMA_CNTX_BASE_SIZE 512 /* Supported DMA context values */ enum i40e_dma_cntx_size { I40E_DMA_CNTX_SIZE_512 = 0, I40E_DMA_CNTX_SIZE_1K = 1, I40E_DMA_CNTX_SIZE_2K = 2, I40E_DMA_CNTX_SIZE_4K = 3, I40E_DMA_CNTX_SIZE_8K = 4, I40E_DMA_CNTX_SIZE_16K = 5, I40E_DMA_CNTX_SIZE_32K = 6, I40E_DMA_CNTX_SIZE_64K = 7, I40E_DMA_CNTX_SIZE_128K = 8, I40E_DMA_CNTX_SIZE_256K = 9, }; /* Supported Hash look up table (LUT) sizes */ enum i40e_hash_lut_size { I40E_HASH_LUT_SIZE_128 = 0, I40E_HASH_LUT_SIZE_512 = 1, }; /* Structure to hold a per PF filter control settings */ struct i40e_filter_control_settings { /* number of PE Quad Hash filter buckets */ enum i40e_hash_filter_size pe_filt_num; /* number of PE Quad Hash contexts */ enum i40e_dma_cntx_size pe_cntx_num; /* number of FCoE filter buckets */ enum i40e_hash_filter_size fcoe_filt_num; /* number of FCoE DDP contexts */ enum i40e_dma_cntx_size fcoe_cntx_num; /* size of the Hash LUT */ enum i40e_hash_lut_size hash_lut_size; /* enable FDIR filters for PF and its VFs */ bool enable_fdir; /* enable Ethertype filters for PF and its VFs */ bool enable_ethtype; /* enable MAC/VLAN filters for PF and its VFs */ bool enable_macvlan; }; /* Structure to hold device level control filter counts */ struct i40e_control_filter_stats { u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */ u16 etype_used; /* Used perfect EtherType filters */ u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */ u16 etype_free; /* Un-used perfect EtherType filters */ }; enum i40e_reset_type { I40E_RESET_POR = 0, I40E_RESET_CORER = 1, I40E_RESET_GLOBR = 2, I40E_RESET_EMPR = 3, }; /* IEEE 802.1AB LLDP Agent Variables from NVM */ #define I40E_NVM_LLDP_CFG_PTR 0x06 #define I40E_SR_LLDP_CFG_PTR 0x31 struct i40e_lldp_variables { u16 length; u16 adminstatus; u16 msgfasttx; u16 msgtxinterval; u16 txparams; u16 timers; u16 crc8; }; /* Offsets into Alternate Ram */ #define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */ #define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */ #define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */ #define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */ #define I40E_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */ #define I40E_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */ /* Alternate Ram Bandwidth Masks */ #define I40E_ALT_BW_VALUE_MASK 0xFF #define I40E_ALT_BW_RELATIVE_MASK 0x40000000 #define I40E_ALT_BW_VALID_MASK 0x80000000 /* RSS Hash Table Size */ #define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 /* INPUT SET MASK for RSS, flow director, and flexible payload */ #define I40E_L3_SRC_SHIFT 47 #define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT) #define I40E_L3_V6_SRC_SHIFT 43 #define I40E_L3_V6_SRC_MASK (0xFFULL << I40E_L3_V6_SRC_SHIFT) #define I40E_L3_DST_SHIFT 35 #define I40E_L3_DST_MASK (0x3ULL << I40E_L3_DST_SHIFT) #define I40E_L3_V6_DST_SHIFT 35 #define I40E_L3_V6_DST_MASK (0xFFULL << I40E_L3_V6_DST_SHIFT) #define I40E_L4_SRC_SHIFT 34 #define I40E_L4_SRC_MASK (0x1ULL << I40E_L4_SRC_SHIFT) #define I40E_L4_DST_SHIFT 33 #define I40E_L4_DST_MASK (0x1ULL << I40E_L4_DST_SHIFT) #define I40E_VERIFY_TAG_SHIFT 31 #define I40E_VERIFY_TAG_MASK (0x3ULL << I40E_VERIFY_TAG_SHIFT) #define I40E_FLEX_50_SHIFT 13 #define I40E_FLEX_50_MASK (0x1ULL << I40E_FLEX_50_SHIFT) #define I40E_FLEX_51_SHIFT 12 #define I40E_FLEX_51_MASK (0x1ULL << I40E_FLEX_51_SHIFT) #define I40E_FLEX_52_SHIFT 11 #define I40E_FLEX_52_MASK (0x1ULL << I40E_FLEX_52_SHIFT) #define I40E_FLEX_53_SHIFT 10 #define I40E_FLEX_53_MASK (0x1ULL << I40E_FLEX_53_SHIFT) #define I40E_FLEX_54_SHIFT 9 #define I40E_FLEX_54_MASK (0x1ULL << I40E_FLEX_54_SHIFT) #define I40E_FLEX_55_SHIFT 8 #define I40E_FLEX_55_MASK (0x1ULL << I40E_FLEX_55_SHIFT) #define I40E_FLEX_56_SHIFT 7 #define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT) #define I40E_FLEX_57_SHIFT 6 #define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT) + +#define I40E_BCM_PHY_PCS_STATUS1_PAGE 0x3 +#define I40E_BCM_PHY_PCS_STATUS1_REG 0x0001 +#define I40E_BCM_PHY_PCS_STATUS1_RX_LPI BIT(8) +#define I40E_BCM_PHY_PCS_STATUS1_TX_LPI BIT(9) + #endif /* _I40E_TYPE_H_ */ Index: releng/11.3/sys/dev/ixl/if_ixl.c =================================================================== --- releng/11.3/sys/dev/ixl/if_ixl.c (revision 349180) +++ releng/11.3/sys/dev/ixl/if_ixl.c (revision 349181) @@ -1,810 +1,916 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixl.h" #include "ixl_pf.h" #ifdef IXL_IW #include "ixl_iw.h" #include "ixl_iw_int.h" #endif #ifdef PCI_IOV #include "ixl_pf_iov.h" #endif /********************************************************************* * Driver version *********************************************************************/ #define IXL_DRIVER_VERSION_MAJOR 1 -#define IXL_DRIVER_VERSION_MINOR 9 +#define IXL_DRIVER_VERSION_MINOR 11 #define IXL_DRIVER_VERSION_BUILD 9 char ixl_driver_version[] = __XSTRING(IXL_DRIVER_VERSION_MAJOR) "." __XSTRING(IXL_DRIVER_VERSION_MINOR) "." __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k"; /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on * Last field stores an index into ixl_strings * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } *********************************************************************/ static ixl_vendor_info_t ixl_vendor_info_array[] = { {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, 0, 0, 0}, + {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC, 0, 0, 0}, + {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP, 0, 0, 0}, + {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B, 0, 0, 0}, /* required last entry */ {0, 0, 0, 0, 0} }; /********************************************************************* * Table of branding strings *********************************************************************/ static char *ixl_strings[] = { "Intel(R) Ethernet Connection 700 Series PF Driver" }; /********************************************************************* * Function prototypes *********************************************************************/ static int ixl_probe(device_t); static int ixl_attach(device_t); static int ixl_detach(device_t); static int ixl_shutdown(device_t); static int ixl_save_pf_tunables(struct ixl_pf *); /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t ixl_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ixl_probe), DEVMETHOD(device_attach, ixl_attach), DEVMETHOD(device_detach, ixl_detach), DEVMETHOD(device_shutdown, ixl_shutdown), #ifdef PCI_IOV DEVMETHOD(pci_iov_init, ixl_iov_init), DEVMETHOD(pci_iov_uninit, ixl_iov_uninit), DEVMETHOD(pci_iov_add_vf, ixl_add_vf), #endif {0, 0} }; static driver_t ixl_driver = { "ixl", ixl_methods, sizeof(struct ixl_pf), }; devclass_t ixl_devclass; DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0); MODULE_VERSION(ixl, 1); MODULE_DEPEND(ixl, pci, 1, 1, 1); MODULE_DEPEND(ixl, ether, 1, 1, 1); #if defined(DEV_NETMAP) && __FreeBSD_version >= 1100000 MODULE_DEPEND(ixl, netmap, 1, 1, 1); #endif /* DEV_NETMAP */ /* ** TUNEABLE PARAMETERS: */ static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0, "IXL driver parameters"); /* * MSIX should be the default for best performance, * but this allows it to be forced off for testing. */ static int ixl_enable_msix = 1; TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix); SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0, "Enable MSI-X interrupts"); /* ** Number of descriptors per ring ** - TX and RX sizes are independently configurable */ static int ixl_tx_ring_size = IXL_DEFAULT_RING; TUNABLE_INT("hw.ixl.tx_ring_size", &ixl_tx_ring_size); SYSCTL_INT(_hw_ixl, OID_AUTO, tx_ring_size, CTLFLAG_RDTUN, &ixl_tx_ring_size, 0, "TX Descriptor Ring Size"); static int ixl_rx_ring_size = IXL_DEFAULT_RING; TUNABLE_INT("hw.ixl.rx_ring_size", &ixl_rx_ring_size); SYSCTL_INT(_hw_ixl, OID_AUTO, rx_ring_size, CTLFLAG_RDTUN, &ixl_rx_ring_size, 0, "RX Descriptor Ring Size"); /* ** This can be set manually, if left as 0 the ** number of queues will be calculated based ** on cpus and msix vectors available. */ static int ixl_max_queues = 0; TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues); SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN, &ixl_max_queues, 0, "Number of Queues"); /* * Leave this on unless you need to send flow control * frames (or other control frames) from software */ static int ixl_enable_tx_fc_filter = 1; TUNABLE_INT("hw.ixl.enable_tx_fc_filter", &ixl_enable_tx_fc_filter); SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN, &ixl_enable_tx_fc_filter, 0, "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources"); +static int ixl_i2c_access_method = 0; +TUNABLE_INT("hw.ixl.i2c_access_method", + &ixl_i2c_access_method); +SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN, + &ixl_i2c_access_method, 0, + IXL_SYSCTL_HELP_I2C_METHOD); + /* * Different method for processing TX descriptor * completion. */ static int ixl_enable_head_writeback = 1; TUNABLE_INT("hw.ixl.enable_head_writeback", &ixl_enable_head_writeback); SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN, &ixl_enable_head_writeback, 0, "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors"); static int ixl_core_debug_mask = 0; TUNABLE_INT("hw.ixl.core_debug_mask", &ixl_core_debug_mask); SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN, &ixl_core_debug_mask, 0, "Display debug statements that are printed in non-shared code"); static int ixl_shared_debug_mask = 0; TUNABLE_INT("hw.ixl.shared_debug_mask", &ixl_shared_debug_mask); SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN, &ixl_shared_debug_mask, 0, "Display debug statements that are printed in shared code"); + /* ** Controls for Interrupt Throttling ** - true/false for dynamic adjustment ** - default values for static ITR */ static int ixl_dynamic_rx_itr = 1; TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr); SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN, &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate"); static int ixl_dynamic_tx_itr = 1; TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr); SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN, &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate"); static int ixl_rx_itr = IXL_ITR_8K; TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr); SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN, &ixl_rx_itr, 0, "RX Interrupt Rate"); static int ixl_tx_itr = IXL_ITR_4K; TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr); SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN, &ixl_tx_itr, 0, "TX Interrupt Rate"); #ifdef IXL_IW int ixl_enable_iwarp = 0; TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp); SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN, &ixl_enable_iwarp, 0, "iWARP enabled"); #if __FreeBSD_version < 1100000 int ixl_limit_iwarp_msix = 1; #else int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX; #endif TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix); SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN, &ixl_limit_iwarp_msix, 0, "Limit MSIX vectors assigned to iWARP"); #endif #ifdef DEV_NETMAP #define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */ #include #endif /* DEV_NETMAP */ /********************************************************************* * Device identification routine * * ixl_probe determines if the driver should be loaded on * the hardware based on PCI vendor/device id of the device. * * return BUS_PROBE_DEFAULT on success, positive on failure *********************************************************************/ static int ixl_probe(device_t dev) { ixl_vendor_info_t *ent; u16 pci_vendor_id, pci_device_id; u16 pci_subvendor_id, pci_subdevice_id; char device_name[256]; #if 0 INIT_DEBUGOUT("ixl_probe: begin"); #endif pci_vendor_id = pci_get_vendor(dev); if (pci_vendor_id != I40E_INTEL_VENDOR_ID) return (ENXIO); pci_device_id = pci_get_device(dev); pci_subvendor_id = pci_get_subvendor(dev); pci_subdevice_id = pci_get_subdevice(dev); ent = ixl_vendor_info_array; while (ent->vendor_id != 0) { if ((pci_vendor_id == ent->vendor_id) && (pci_device_id == ent->device_id) && ((pci_subvendor_id == ent->subvendor_id) || (ent->subvendor_id == 0)) && ((pci_subdevice_id == ent->subdevice_id) || (ent->subdevice_id == 0))) { sprintf(device_name, "%s, Version - %s", ixl_strings[ent->index], ixl_driver_version); device_set_desc_copy(dev, device_name); return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); } /* * Sanity check and save off tunable values. */ static int ixl_save_pf_tunables(struct ixl_pf *pf) { device_t dev = pf->dev; /* Save tunable information */ pf->enable_msix = ixl_enable_msix; pf->max_queues = ixl_max_queues; pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter; pf->dynamic_rx_itr = ixl_dynamic_rx_itr; pf->dynamic_tx_itr = ixl_dynamic_tx_itr; pf->dbg_mask = ixl_core_debug_mask; pf->hw.debug_mask = ixl_shared_debug_mask; #ifdef DEV_NETMAP if (ixl_enable_head_writeback == 0) device_printf(dev, "Head writeback mode cannot be disabled " "when netmap is enabled\n"); pf->vsi.enable_head_writeback = 1; #else pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback); #endif - ixl_vsi_setup_rings_size(&pf->vsi, ixl_tx_ring_size, ixl_rx_ring_size); + if (ixl_i2c_access_method > IXL_I2C_ACCESS_METHOD_TYPE_LENGTH - 1 + || ixl_i2c_access_method < IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE) + pf->i2c_access_method = IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE; + else + pf->i2c_access_method = + (enum ixl_i2c_access_method_t)ixl_i2c_access_method; + if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) { device_printf(dev, "Invalid tx_itr value of %d set!\n", ixl_tx_itr); device_printf(dev, "tx_itr must be between %d and %d, " "inclusive\n", 0, IXL_MAX_ITR); device_printf(dev, "Using default value of %d instead\n", IXL_ITR_4K); pf->tx_itr = IXL_ITR_4K; } else pf->tx_itr = ixl_tx_itr; if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) { device_printf(dev, "Invalid rx_itr value of %d set!\n", ixl_rx_itr); device_printf(dev, "rx_itr must be between %d and %d, " "inclusive\n", 0, IXL_MAX_ITR); device_printf(dev, "Using default value of %d instead\n", IXL_ITR_8K); pf->rx_itr = IXL_ITR_8K; } else pf->rx_itr = ixl_rx_itr; return (0); } +static int +ixl_attach_recovery_mode(struct ixl_pf *pf) +{ + struct ixl_vsi *vsi = &pf->vsi; + struct i40e_hw *hw = &pf->hw; + device_t dev = pf->dev; + int error = 0; + + device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); + + atomic_set_int(&pf->state, IXL_PF_STATE_RECOVERY_MODE); + + i40e_get_mac_addr(hw, hw->mac.addr); + + pf->msix = ixl_init_msix(pf); + ixl_setup_stations(pf); + ixl_setup_interface(pf->dev, vsi); + + if (pf->msix > 1) { + error = ixl_setup_adminq_msix(pf); + if (error) { + device_printf(dev, "ixl_setup_adminq_msix() error: %d\n", + error); + goto recovery_err_late; + } + error = ixl_setup_adminq_tq(pf); + if (error) { + device_printf(dev, "ixl_setup_adminq_tq() error: %d\n", + error); + goto recovery_err_late; + } + ixl_configure_intr0_msix(pf); + ixl_enable_intr0(hw); + } else { + error = ixl_setup_legacy(pf); + + error = ixl_setup_adminq_tq(pf); + if (error) { + device_printf(dev, "ixl_setup_adminq_tq() error: %d\n", + error); + goto recovery_err_late; + } + } + + /* Get the bus configuration and set the shared code's config */ + ixl_get_bus_info(pf); + + /* Initialize statistics & add sysctls */ + ixl_add_device_sysctls(pf); + + /* Start the local timer */ + IXL_PF_LOCK(pf); + callout_reset(&pf->timer, hz, ixl_local_timer, pf); + IXL_PF_UNLOCK(pf); + +recovery_err_late: + return (error); +} + /********************************************************************* * Device initialization routine * * The attach entry point is called when the driver is being loaded. * This routine identifies the type of hardware, allocates all resources * and initializes the hardware. * * return 0 on success, positive on failure *********************************************************************/ static int ixl_attach(device_t dev) { struct ixl_pf *pf; struct i40e_hw *hw; struct ixl_vsi *vsi; + enum i40e_get_fw_lldp_status_resp lldp_status; enum i40e_status_code status; int error = 0; INIT_DEBUGOUT("ixl_attach: begin"); /* Allocate, clear, and link in our primary soft structure */ pf = device_get_softc(dev); pf->dev = pf->osdep.dev = dev; hw = &pf->hw; /* ** Note this assumes we have a single embedded VSI, ** this could be enhanced later to allocate multiple */ vsi = &pf->vsi; vsi->dev = pf->dev; vsi->back = pf; /* Save tunable values */ error = ixl_save_pf_tunables(pf); if (error) return (error); /* Core Lock Init*/ IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev)); /* Set up the timer callout */ callout_init_mtx(&pf->timer, &pf->pf_mtx, 0); /* Do PCI setup - map BAR0, etc */ if (ixl_allocate_pci_resources(pf)) { - device_printf(dev, "Allocation of PCI resources failed\n"); error = ENXIO; goto err_out; } /* Establish a clean starting point */ i40e_clear_hw(hw); - status = i40e_pf_reset(hw); - if (status) { - device_printf(dev, "PF reset failure %s\n", - i40e_stat_str(hw, status)); - error = EIO; - goto err_out; + + /* Don't try to reset device if it's in recovery mode */ + if (!ixl_fw_recovery_mode(pf)) { + status = i40e_pf_reset(hw); + if (status) { + device_printf(dev, "PF reset failure %s\n", + i40e_stat_str(hw, status)); + error = EIO; + goto err_out; + } } /* Initialize the shared code */ status = i40e_init_shared_code(hw); if (status) { device_printf(dev, "Unable to initialize shared code, error %s\n", i40e_stat_str(hw, status)); error = EIO; goto err_out; } /* Set up the admin queue */ hw->aq.num_arq_entries = IXL_AQ_LEN; hw->aq.num_asq_entries = IXL_AQ_LEN; hw->aq.arq_buf_size = IXL_AQ_BUF_SZ; hw->aq.asq_buf_size = IXL_AQ_BUF_SZ; status = i40e_init_adminq(hw); if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) { device_printf(dev, "Unable to initialize Admin Queue, error %s\n", i40e_stat_str(hw, status)); error = EIO; goto err_out; } ixl_print_nvm_version(pf); if (status == I40E_ERR_FIRMWARE_API_VERSION) { device_printf(dev, "The driver for the device stopped " "because the NVM image is newer than expected.\n"); device_printf(dev, "You must install the most recent version of " "the network driver.\n"); error = EIO; goto err_out; } if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) { device_printf(dev, "The driver for the device detected " "a newer version of the NVM image than expected.\n"); device_printf(dev, "Please install the most recent version " "of the network driver.\n"); } else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) { device_printf(dev, "The driver for the device detected " "an older version of the NVM image than expected.\n"); device_printf(dev, "Please update the NVM image.\n"); } + if (ixl_fw_recovery_mode(pf)) + return ixl_attach_recovery_mode(pf); + /* Clear PXE mode */ i40e_clear_pxe_mode(hw); /* Get capabilities from the device */ error = ixl_get_hw_capabilities(pf); if (error) { device_printf(dev, "HW capabilities failure!\n"); goto err_get_cap; } /* * Allocate interrupts and figure out number of queues to use * for PF interface */ pf->msix = ixl_init_msix(pf); /* Set up host memory cache */ status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, hw->func_caps.num_rx_qp, 0, 0); if (status) { device_printf(dev, "init_lan_hmc failed: %s\n", i40e_stat_str(hw, status)); goto err_get_cap; } status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); if (status) { device_printf(dev, "configure_lan_hmc failed: %s\n", i40e_stat_str(hw, status)); goto err_mac_hmc; } /* Init queue allocation manager */ error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp); if (error) { device_printf(dev, "Failed to init queue manager for PF queues, error %d\n", error); goto err_mac_hmc; } /* reserve a contiguous allocation for the PF's VSI */ error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_queues, &pf->qtag); if (error) { device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n", error); goto err_mac_hmc; } device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n", pf->qtag.num_allocated, pf->qtag.num_active); - /* Disable LLDP from the firmware for certain NVM versions */ - if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || - (pf->hw.aq.fw_maj_ver < 4)) { - i40e_aq_stop_lldp(hw, TRUE, NULL); + /* Disable LLDP from the firmware on XL710 for certain NVM versions */ + if (hw->mac.type == I40E_MAC_XL710 && + (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 3)) || + (hw->aq.fw_maj_ver < 4))) { + i40e_aq_stop_lldp(hw, true, false, NULL); pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED; } + /* Try enabling Energy Efficient Ethernet (EEE) mode */ + if(i40e_enable_eee(hw, true) == I40E_SUCCESS) + atomic_set_int(&pf->state, IXL_PF_STATE_EEE_ENABLED); + else + atomic_clear_int(&pf->state, IXL_PF_STATE_EEE_ENABLED); + /* Get MAC addresses from hardware */ i40e_get_mac_addr(hw, hw->mac.addr); error = i40e_validate_mac_addr(hw->mac.addr); if (error) { device_printf(dev, "validate_mac_addr failed: %d\n", error); goto err_mac_hmc; } bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN); i40e_get_port_mac_addr(hw, hw->mac.port_addr); /* Query device FW LLDP status */ - ixl_get_fw_lldp_status(pf); + if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) { + if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) { + atomic_set_int(&pf->state, + IXL_PF_STATE_FW_LLDP_DISABLED); + } else { + atomic_clear_int(&pf->state, + IXL_PF_STATE_FW_LLDP_DISABLED); + } + } + /* Tell FW to apply DCB config on link up */ - if ((hw->mac.type != I40E_MAC_X722) - && ((pf->hw.aq.api_maj_ver > 1) - || (pf->hw.aq.api_maj_ver == 1 && pf->hw.aq.api_min_ver >= 7))) - i40e_aq_set_dcb_parameters(hw, true, NULL); + i40e_aq_set_dcb_parameters(hw, true, NULL); - /* Initialize mac filter list for VSI */ - SLIST_INIT(&vsi->ftl); - /* Set up SW VSI and allocate queue memory and rings */ if (ixl_setup_stations(pf)) { device_printf(dev, "setup stations failed!\n"); error = ENOMEM; goto err_mac_hmc; } /* Setup OS network interface / ifnet */ if (ixl_setup_interface(dev, vsi)) { device_printf(dev, "interface setup failed!\n"); error = EIO; goto err_late; } /* Determine link state */ if (ixl_attach_get_link_status(pf)) { error = EINVAL; goto err_late; } error = ixl_switch_config(pf); if (error) { device_printf(dev, "Initial ixl_switch_config() failed: %d\n", error); goto err_late; } /* Limit PHY interrupts to link, autoneg, and modules failure */ status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK, NULL); if (status) { device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s," " aq_err %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); goto err_late; } /* Get the bus configuration and set the shared code's config */ ixl_get_bus_info(pf); /* * In MSI-X mode, initialize the Admin Queue interrupt, * so userland tools can communicate with the adapter regardless of * the ifnet interface's status. */ if (pf->msix > 1) { error = ixl_setup_adminq_msix(pf); if (error) { device_printf(dev, "ixl_setup_adminq_msix() error: %d\n", error); goto err_late; } error = ixl_setup_adminq_tq(pf); if (error) { device_printf(dev, "ixl_setup_adminq_tq() error: %d\n", error); goto err_late; } ixl_configure_intr0_msix(pf); ixl_enable_intr0(hw); error = ixl_setup_queue_msix(vsi); if (error) device_printf(dev, "ixl_setup_queue_msix() error: %d\n", error); error = ixl_setup_queue_tqs(vsi); if (error) device_printf(dev, "ixl_setup_queue_tqs() error: %d\n", error); } else { error = ixl_setup_legacy(pf); error = ixl_setup_adminq_tq(pf); if (error) { device_printf(dev, "ixl_setup_adminq_tq() error: %d\n", error); goto err_late; } error = ixl_setup_queue_tqs(vsi); if (error) device_printf(dev, "ixl_setup_queue_tqs() error: %d\n", error); } if (error) { device_printf(dev, "interrupt setup error: %d\n", error); } /* Set initial advertised speed sysctl value */ ixl_set_initial_advertised_speeds(pf); /* Initialize statistics & add sysctls */ ixl_add_device_sysctls(pf); ixl_pf_reset_stats(pf); ixl_update_stats_counters(pf); ixl_add_hw_stats(pf); + /* Add protocol filters to list */ + ixl_init_filters(vsi); + /* Register for VLAN events */ vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST); vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST); #ifdef PCI_IOV ixl_initialize_sriov(pf); #endif #ifdef DEV_NETMAP ixl_netmap_attach(vsi); #endif /* DEV_NETMAP */ #ifdef IXL_IW if (hw->func_caps.iwarp && ixl_enable_iwarp) { pf->iw_enabled = (pf->iw_msix > 0) ? true : false; if (pf->iw_enabled) { error = ixl_iw_pf_attach(pf); if (error) { device_printf(dev, "interfacing to iwarp driver failed: %d\n", error); goto err_late; } else device_printf(dev, "iWARP ready\n"); } else device_printf(dev, "iwarp disabled on this device (no msix vectors)\n"); } else { pf->iw_enabled = false; device_printf(dev, "The device is not iWARP enabled\n"); } #endif + /* Start the local timer */ + IXL_PF_LOCK(pf); + callout_reset(&pf->timer, hz, ixl_local_timer, pf); + IXL_PF_UNLOCK(pf); INIT_DEBUGOUT("ixl_attach: end"); return (0); err_late: if (vsi->ifp != NULL) { ether_ifdetach(vsi->ifp); if_free(vsi->ifp); } err_mac_hmc: i40e_shutdown_lan_hmc(hw); err_get_cap: i40e_shutdown_adminq(hw); err_out: ixl_free_pci_resources(pf); ixl_free_vsi(vsi); IXL_PF_LOCK_DESTROY(pf); return (error); } /********************************************************************* * Device removal routine * * The detach entry point is called when the driver is being removed. * This routine stops the adapter and deallocates all the resources * that were allocated for driver operation. * * return 0 on success, positive on failure *********************************************************************/ static int ixl_detach(device_t dev) { struct ixl_pf *pf = device_get_softc(dev); struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; enum i40e_status_code status; #if defined(PCI_IOV) || defined(IXL_IW) int error; #endif INIT_DEBUGOUT("ixl_detach: begin"); /* Make sure VLANS are not using driver */ if (vsi->ifp->if_vlantrunk != NULL) { device_printf(dev, "Vlan in use, detach first\n"); return (EBUSY); } #ifdef PCI_IOV error = pci_iov_detach(dev); if (error != 0) { device_printf(dev, "SR-IOV in use; detach first.\n"); return (error); } #endif /* Remove all previously allocated media types */ ifmedia_removeall(&vsi->media); ether_ifdetach(vsi->ifp); if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) ixl_stop(pf); /* Shutdown LAN HMC */ - status = i40e_shutdown_lan_hmc(hw); - if (status) - device_printf(dev, - "Shutdown LAN HMC failed with code %d\n", status); + if (hw->hmc.hmc_obj) { + status = i40e_shutdown_lan_hmc(hw); + if (status) + device_printf(dev, + "Shutdown LAN HMC failed with code %s\n", i40e_stat_str(hw, status)); + } /* Teardown LAN queue resources */ ixl_teardown_queue_msix(vsi); ixl_free_queue_tqs(vsi); + + /* Timer enqueues admin task. Stop it before freeing the admin taskqueue */ + callout_drain(&pf->timer); + /* Shutdown admin queue */ ixl_disable_intr0(hw); ixl_teardown_adminq_msix(pf); - ixl_free_adminq_tq(pf); + status = i40e_shutdown_adminq(hw); if (status) device_printf(dev, "Shutdown Admin queue failed with code %d\n", status); + ixl_free_adminq_tq(pf); + /* Unregister VLAN events */ if (vsi->vlan_attach != NULL) EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach); if (vsi->vlan_detach != NULL) EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach); - - callout_drain(&pf->timer); #ifdef IXL_IW if (ixl_enable_iwarp && pf->iw_enabled) { error = ixl_iw_pf_detach(pf); if (error == EBUSY) { device_printf(dev, "iwarp in use; stop it first.\n"); return (error); } } #endif #ifdef DEV_NETMAP netmap_detach(vsi->ifp); #endif /* DEV_NETMAP */ ixl_pf_qmgr_destroy(&pf->qmgr); ixl_free_pci_resources(pf); bus_generic_detach(dev); if_free(vsi->ifp); ixl_free_vsi(vsi); IXL_PF_LOCK_DESTROY(pf); return (0); } /********************************************************************* * * Shutdown entry point * **********************************************************************/ static int ixl_shutdown(device_t dev) { struct ixl_pf *pf = device_get_softc(dev); ixl_stop(pf); return (0); } Index: releng/11.3/sys/dev/ixl/if_ixlv.c =================================================================== --- releng/11.3/sys/dev/ixl/if_ixlv.c (revision 349180) +++ releng/11.3/sys/dev/ixl/if_ixlv.c (revision 349181) @@ -1,3245 +1,3176 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ +#include "sys/limits.h" + #include "ixl.h" #include "ixlv.h" /********************************************************************* * Driver version *********************************************************************/ #define IXLV_DRIVER_VERSION_MAJOR 1 #define IXLV_DRIVER_VERSION_MINOR 5 -#define IXLV_DRIVER_VERSION_BUILD 4 +#define IXLV_DRIVER_VERSION_BUILD 8 char ixlv_driver_version[] = __XSTRING(IXLV_DRIVER_VERSION_MAJOR) "." __XSTRING(IXLV_DRIVER_VERSION_MINOR) "." __XSTRING(IXLV_DRIVER_VERSION_BUILD) "-k"; /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on * Last field stores an index into ixlv_strings * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } *********************************************************************/ static ixl_vendor_info_t ixlv_vendor_info_array[] = { {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_ADAPTIVE_VF, 0, 0, 0}, /* required last entry */ {0, 0, 0, 0, 0} }; /********************************************************************* * Table of branding strings *********************************************************************/ static char *ixlv_strings[] = { "Intel(R) Ethernet Connection 700 Series VF Driver" }; /********************************************************************* * Function prototypes *********************************************************************/ static int ixlv_probe(device_t); static int ixlv_attach(device_t); static int ixlv_detach(device_t); static int ixlv_shutdown(device_t); static void ixlv_init_locked(struct ixlv_sc *); static int ixlv_allocate_pci_resources(struct ixlv_sc *); static void ixlv_free_pci_resources(struct ixlv_sc *); static int ixlv_assign_msix(struct ixlv_sc *); static int ixlv_init_msix(struct ixlv_sc *); static int ixlv_init_taskqueue(struct ixlv_sc *); static int ixlv_setup_queues(struct ixlv_sc *); static void ixlv_config_rss(struct ixlv_sc *); static void ixlv_stop(struct ixlv_sc *); static void ixlv_add_multi(struct ixl_vsi *); static void ixlv_del_multi(struct ixl_vsi *); static void ixlv_free_queue(struct ixlv_sc *sc, struct ixl_queue *que); static void ixlv_free_queues(struct ixl_vsi *); static int ixlv_setup_interface(device_t, struct ixlv_sc *); static int ixlv_teardown_adminq_msix(struct ixlv_sc *); static int ixlv_media_change(struct ifnet *); static void ixlv_media_status(struct ifnet *, struct ifmediareq *); static void ixlv_local_timer(void *); static int ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16); static int ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr); static void ixlv_init_filters(struct ixlv_sc *); static void ixlv_free_filters(struct ixlv_sc *); static void ixlv_msix_que(void *); static void ixlv_msix_adminq(void *); static void ixlv_do_adminq(void *, int); static void ixlv_do_adminq_locked(struct ixlv_sc *sc); static void ixlv_handle_que(void *, int); static int ixlv_reset(struct ixlv_sc *); static int ixlv_reset_complete(struct i40e_hw *); static void ixlv_set_queue_rx_itr(struct ixl_queue *); static void ixlv_set_queue_tx_itr(struct ixl_queue *); static void ixl_init_cmd_complete(struct ixl_vc_cmd *, void *, enum i40e_status_code); static void ixlv_configure_itr(struct ixlv_sc *); static void ixlv_enable_adminq_irq(struct i40e_hw *); static void ixlv_disable_adminq_irq(struct i40e_hw *); static void ixlv_enable_queue_irq(struct i40e_hw *, int); static void ixlv_disable_queue_irq(struct i40e_hw *, int); static void ixlv_setup_vlan_filters(struct ixlv_sc *); static void ixlv_register_vlan(void *, struct ifnet *, u16); static void ixlv_unregister_vlan(void *, struct ifnet *, u16); static void ixlv_init_hw(struct ixlv_sc *); static int ixlv_setup_vc(struct ixlv_sc *); static int ixlv_vf_config(struct ixlv_sc *); static void ixlv_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int); static char *ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed); static int ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS); static void ixlv_add_sysctls(struct ixlv_sc *); #ifdef IXL_DEBUG static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS); static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS); #endif /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t ixlv_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ixlv_probe), DEVMETHOD(device_attach, ixlv_attach), DEVMETHOD(device_detach, ixlv_detach), DEVMETHOD(device_shutdown, ixlv_shutdown), {0, 0} }; static driver_t ixlv_driver = { "ixlv", ixlv_methods, sizeof(struct ixlv_sc), }; devclass_t ixlv_devclass; DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0); MODULE_DEPEND(ixlv, pci, 1, 1, 1); MODULE_DEPEND(ixlv, ether, 1, 1, 1); /* ** TUNEABLE PARAMETERS: */ static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0, "IXLV driver parameters"); /* ** Number of descriptors per ring: ** - TX and RX sizes are independently configurable */ static int ixlv_tx_ring_size = IXL_DEFAULT_RING; TUNABLE_INT("hw.ixlv.tx_ring_size", &ixlv_tx_ring_size); SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_ring_size, CTLFLAG_RDTUN, &ixlv_tx_ring_size, 0, "TX Descriptor Ring Size"); static int ixlv_rx_ring_size = IXL_DEFAULT_RING; TUNABLE_INT("hw.ixlv.rx_ring_size", &ixlv_rx_ring_size); SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_ring_size, CTLFLAG_RDTUN, &ixlv_rx_ring_size, 0, "TX Descriptor Ring Size"); /* Set to zero to auto calculate */ int ixlv_max_queues = 0; TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues); SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN, &ixlv_max_queues, 0, "Number of Queues"); /* ** Number of entries in Tx queue buf_ring. ** Increasing this will reduce the number of ** errors when transmitting fragmented UDP ** packets. */ static int ixlv_txbrsz = DEFAULT_TXBRSZ; TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz); SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN, &ixlv_txbrsz, 0, "TX Buf Ring Size"); /* * Different method for processing TX descriptor * completion. */ static int ixlv_enable_head_writeback = 0; TUNABLE_INT("hw.ixlv.enable_head_writeback", &ixlv_enable_head_writeback); SYSCTL_INT(_hw_ixlv, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN, &ixlv_enable_head_writeback, 0, "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors"); /* ** Controls for Interrupt Throttling ** - true/false for dynamic adjustment ** - default values for static ITR */ int ixlv_dynamic_rx_itr = 0; TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr); SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN, &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate"); int ixlv_dynamic_tx_itr = 0; TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr); SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN, &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate"); int ixlv_rx_itr = IXL_ITR_8K; TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr); SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN, &ixlv_rx_itr, 0, "RX Interrupt Rate"); int ixlv_tx_itr = IXL_ITR_4K; TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr); SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN, &ixlv_tx_itr, 0, "TX Interrupt Rate"); /********************************************************************* * Device identification routine * * ixlv_probe determines if the driver should be loaded on * the hardware based on PCI vendor/device id of the device. * * return BUS_PROBE_DEFAULT on success, positive on failure *********************************************************************/ static int ixlv_probe(device_t dev) { ixl_vendor_info_t *ent; u16 pci_vendor_id, pci_device_id; u16 pci_subvendor_id, pci_subdevice_id; char device_name[256]; #if 0 INIT_DEBUGOUT("ixlv_probe: begin"); #endif pci_vendor_id = pci_get_vendor(dev); if (pci_vendor_id != I40E_INTEL_VENDOR_ID) return (ENXIO); pci_device_id = pci_get_device(dev); pci_subvendor_id = pci_get_subvendor(dev); pci_subdevice_id = pci_get_subdevice(dev); ent = ixlv_vendor_info_array; while (ent->vendor_id != 0) { if ((pci_vendor_id == ent->vendor_id) && (pci_device_id == ent->device_id) && ((pci_subvendor_id == ent->subvendor_id) || (ent->subvendor_id == 0)) && ((pci_subdevice_id == ent->subdevice_id) || (ent->subdevice_id == 0))) { sprintf(device_name, "%s, Version - %s", ixlv_strings[ent->index], ixlv_driver_version); device_set_desc_copy(dev, device_name); return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); } /********************************************************************* * Device initialization routine * * The attach entry point is called when the driver is being loaded. * This routine identifies the type of hardware, allocates all resources * and initializes the hardware. * * return 0 on success, positive on failure *********************************************************************/ static int ixlv_attach(device_t dev) { struct ixlv_sc *sc; struct i40e_hw *hw; struct ixl_vsi *vsi; int error = 0; INIT_DBG_DEV(dev, "begin"); /* Allocate, clear, and link in our primary soft structure */ sc = device_get_softc(dev); sc->dev = sc->osdep.dev = dev; hw = &sc->hw; vsi = &sc->vsi; vsi->dev = dev; /* Initialize hw struct */ ixlv_init_hw(sc); /* Allocate filter lists */ ixlv_init_filters(sc); /* Save this tunable */ vsi->enable_head_writeback = ixlv_enable_head_writeback; /* Core Lock Init */ mtx_init(&sc->mtx, device_get_nameunit(dev), "IXL SC Lock", MTX_DEF); /* Set up the timer callout */ callout_init_mtx(&sc->timer, &sc->mtx, 0); /* Do PCI setup - map BAR0, etc */ if (ixlv_allocate_pci_resources(sc)) { device_printf(dev, "%s: Allocation of PCI resources failed\n", __func__); error = ENXIO; goto err_early; } INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors"); error = i40e_set_mac_type(hw); if (error) { device_printf(dev, "%s: set_mac_type failed: %d\n", __func__, error); goto err_pci_res; } error = ixlv_reset_complete(hw); if (error) { device_printf(dev, "%s: Device is still being reset\n", __func__); goto err_pci_res; } INIT_DBG_DEV(dev, "VF Device is ready for configuration"); error = ixlv_setup_vc(sc); if (error) { device_printf(dev, "%s: Error setting up PF comms, %d\n", __func__, error); goto err_pci_res; } INIT_DBG_DEV(dev, "PF API version verified"); /* Need API version before sending reset message */ error = ixlv_reset(sc); if (error) { device_printf(dev, "VF reset failed; reload the driver\n"); goto err_aq; } INIT_DBG_DEV(dev, "VF reset complete"); /* Ask for VF config from PF */ error = ixlv_vf_config(sc); if (error) { device_printf(dev, "Error getting configuration from PF: %d\n", error); goto err_aq; } device_printf(dev, "VSIs %d, QPs %d, MSIX %d, RSS sizes: key %d lut %d\n", sc->vf_res->num_vsis, sc->vf_res->num_queue_pairs, sc->vf_res->max_vectors, sc->vf_res->rss_key_size, sc->vf_res->rss_lut_size); #ifdef IXL_DEBUG device_printf(dev, "Offload flags: 0x%b\n", sc->vf_res->vf_offload_flags, IXLV_PRINTF_VF_OFFLOAD_FLAGS); #endif /* got VF config message back from PF, now we can parse it */ for (int i = 0; i < sc->vf_res->num_vsis; i++) { if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV) sc->vsi_res = &sc->vf_res->vsi_res[i]; } if (!sc->vsi_res) { device_printf(dev, "%s: no LAN VSI found\n", __func__); error = EIO; goto err_res_buf; } INIT_DBG_DEV(dev, "Resource Acquisition complete"); /* If no mac address was assigned just make a random one */ if (!ixlv_check_ether_addr(hw->mac.addr)) { u8 addr[ETHER_ADDR_LEN]; arc4rand(&addr, sizeof(addr), 0); addr[0] &= 0xFE; addr[0] |= 0x02; bcopy(addr, hw->mac.addr, sizeof(addr)); } /* Now that the number of queues for this VF is known, set up interrupts */ sc->msix = ixlv_init_msix(sc); /* We fail without MSIX support */ if (sc->msix == 0) { error = ENXIO; goto err_res_buf; } vsi->id = sc->vsi_res->vsi_id; vsi->back = (void *)sc; vsi->flags |= IXL_FLAGS_IS_VF | IXL_FLAGS_USES_MSIX; ixl_vsi_setup_rings_size(vsi, ixlv_tx_ring_size, ixlv_rx_ring_size); /* This allocates the memory and early settings */ if (ixlv_setup_queues(sc) != 0) { device_printf(dev, "%s: setup queues failed!\n", __func__); error = EIO; goto out; } /* Do queue interrupt setup */ if (ixlv_assign_msix(sc) != 0) { device_printf(dev, "%s: allocating queue interrupts failed!\n", __func__); error = ENXIO; goto out; } INIT_DBG_DEV(dev, "Queue memory and interrupts setup"); /* Setup the stack interface */ if (ixlv_setup_interface(dev, sc) != 0) { device_printf(dev, "%s: setup interface failed!\n", __func__); error = EIO; goto out; } INIT_DBG_DEV(dev, "Interface setup complete"); /* Start AdminQ taskqueue */ ixlv_init_taskqueue(sc); /* We expect a link state message, so schedule the AdminQ task now */ taskqueue_enqueue(sc->tq, &sc->aq_irq); /* Initialize stats */ bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats)); ixlv_add_sysctls(sc); /* Register for VLAN events */ vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST); vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST); /* We want AQ enabled early */ ixlv_enable_adminq_irq(hw); /* Set things up to run init */ sc->init_state = IXLV_INIT_READY; ixl_vc_init_mgr(sc, &sc->vc_mgr); INIT_DBG_DEV(dev, "end"); return (error); out: ixlv_free_queues(vsi); ixlv_teardown_adminq_msix(sc); err_res_buf: free(sc->vf_res, M_DEVBUF); err_aq: i40e_shutdown_adminq(hw); err_pci_res: ixlv_free_pci_resources(sc); err_early: mtx_destroy(&sc->mtx); ixlv_free_filters(sc); INIT_DBG_DEV(dev, "end: error %d", error); return (error); } /********************************************************************* * Device removal routine * * The detach entry point is called when the driver is being removed. * This routine stops the adapter and deallocates all the resources * that were allocated for driver operation. * * return 0 on success, positive on failure *********************************************************************/ static int ixlv_detach(device_t dev) { struct ixlv_sc *sc = device_get_softc(dev); struct ixl_vsi *vsi = &sc->vsi; struct i40e_hw *hw = &sc->hw; enum i40e_status_code status; INIT_DBG_DEV(dev, "begin"); /* Make sure VLANS are not using driver */ if (vsi->ifp->if_vlantrunk != NULL) { if_printf(vsi->ifp, "Vlan in use, detach first\n"); return (EBUSY); } /* Remove all the media and link information */ ifmedia_removeall(&sc->media); /* Stop driver */ ether_ifdetach(vsi->ifp); if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) { mtx_lock(&sc->mtx); ixlv_stop(sc); mtx_unlock(&sc->mtx); } /* Unregister VLAN events */ if (vsi->vlan_attach != NULL) EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach); if (vsi->vlan_detach != NULL) EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach); /* Drain VC mgr */ callout_drain(&sc->vc_mgr.callout); ixlv_disable_adminq_irq(hw); ixlv_teardown_adminq_msix(sc); /* Drain admin queue taskqueue */ taskqueue_free(sc->tq); status = i40e_shutdown_adminq(&sc->hw); if (status != I40E_SUCCESS) { device_printf(dev, "i40e_shutdown_adminq() failed with status %s\n", i40e_stat_str(hw, status)); } if_free(vsi->ifp); free(sc->vf_res, M_DEVBUF); ixlv_free_queues(vsi); ixlv_free_pci_resources(sc); ixlv_free_filters(sc); bus_generic_detach(dev); mtx_destroy(&sc->mtx); INIT_DBG_DEV(dev, "end"); return (0); } /********************************************************************* * * Shutdown entry point * **********************************************************************/ static int ixlv_shutdown(device_t dev) { struct ixlv_sc *sc = device_get_softc(dev); INIT_DBG_DEV(dev, "begin"); mtx_lock(&sc->mtx); ixlv_stop(sc); mtx_unlock(&sc->mtx); INIT_DBG_DEV(dev, "end"); return (0); } /* * Configure TXCSUM(IPV6) and TSO(4/6) * - the hardware handles these together so we * need to tweak them */ static void ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask) { /* Enable/disable TXCSUM/TSO4 */ if (!(ifp->if_capenable & IFCAP_TXCSUM) && !(ifp->if_capenable & IFCAP_TSO4)) { if (mask & IFCAP_TXCSUM) { ifp->if_capenable |= IFCAP_TXCSUM; /* enable TXCSUM, restore TSO if previously enabled */ if (vsi->flags & IXL_FLAGS_KEEP_TSO4) { vsi->flags &= ~IXL_FLAGS_KEEP_TSO4; ifp->if_capenable |= IFCAP_TSO4; } } else if (mask & IFCAP_TSO4) { ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4); vsi->flags &= ~IXL_FLAGS_KEEP_TSO4; if_printf(ifp, "TSO4 requires txcsum, enabling both...\n"); } } else if((ifp->if_capenable & IFCAP_TXCSUM) && !(ifp->if_capenable & IFCAP_TSO4)) { if (mask & IFCAP_TXCSUM) ifp->if_capenable &= ~IFCAP_TXCSUM; else if (mask & IFCAP_TSO4) ifp->if_capenable |= IFCAP_TSO4; } else if((ifp->if_capenable & IFCAP_TXCSUM) && (ifp->if_capenable & IFCAP_TSO4)) { if (mask & IFCAP_TXCSUM) { vsi->flags |= IXL_FLAGS_KEEP_TSO4; ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4); if_printf(ifp, "TSO4 requires txcsum, disabling both...\n"); } else if (mask & IFCAP_TSO4) ifp->if_capenable &= ~IFCAP_TSO4; } /* Enable/disable TXCSUM_IPV6/TSO6 */ if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6) && !(ifp->if_capenable & IFCAP_TSO6)) { if (mask & IFCAP_TXCSUM_IPV6) { ifp->if_capenable |= IFCAP_TXCSUM_IPV6; if (vsi->flags & IXL_FLAGS_KEEP_TSO6) { vsi->flags &= ~IXL_FLAGS_KEEP_TSO6; ifp->if_capenable |= IFCAP_TSO6; } } else if (mask & IFCAP_TSO6) { ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); vsi->flags &= ~IXL_FLAGS_KEEP_TSO6; if_printf(ifp, "TSO6 requires txcsum6, enabling both...\n"); } } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6) && !(ifp->if_capenable & IFCAP_TSO6)) { if (mask & IFCAP_TXCSUM_IPV6) ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6; else if (mask & IFCAP_TSO6) ifp->if_capenable |= IFCAP_TSO6; } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6) && (ifp->if_capenable & IFCAP_TSO6)) { if (mask & IFCAP_TXCSUM_IPV6) { vsi->flags |= IXL_FLAGS_KEEP_TSO6; ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); if_printf(ifp, "TSO6 requires txcsum6, disabling both...\n"); } else if (mask & IFCAP_TSO6) ifp->if_capenable &= ~IFCAP_TSO6; } } /********************************************************************* * Ioctl entry point * * ixlv_ioctl is called when the user wants to configure the * interface. * * return 0 on success, positive on failure **********************************************************************/ static int ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct ixl_vsi *vsi = ifp->if_softc; struct ixlv_sc *sc = vsi->back; struct ifreq *ifr = (struct ifreq *)data; #if defined(INET) || defined(INET6) struct ifaddr *ifa = (struct ifaddr *)data; bool avoid_reset = FALSE; #endif int error = 0; switch (command) { case SIOCSIFADDR: #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) avoid_reset = TRUE; #endif #ifdef INET6 if (ifa->ifa_addr->sa_family == AF_INET6) avoid_reset = TRUE; #endif #if defined(INET) || defined(INET6) /* ** Calling init results in link renegotiation, ** so we avoid doing it when possible. */ if (avoid_reset) { ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ixlv_init(vsi); #ifdef INET if (!(ifp->if_flags & IFF_NOARP)) arp_ifinit(ifp, ifa); #endif } else error = ether_ioctl(ifp, command, data); break; #endif case SIOCSIFMTU: IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)"); mtx_lock(&sc->mtx); if (ifr->ifr_mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) { error = EINVAL; IOCTL_DBG_IF(ifp, "mtu too large"); } else { IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", (u_long)ifp->if_mtu, ifr->ifr_mtu); // ERJ: Interestingly enough, these types don't match ifp->if_mtu = (u_long)ifr->ifr_mtu; vsi->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; if (ifp->if_drv_flags & IFF_DRV_RUNNING) ixlv_init_locked(sc); } mtx_unlock(&sc->mtx); break; case SIOCSIFFLAGS: IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)"); mtx_lock(&sc->mtx); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ixlv_init_locked(sc); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) ixlv_stop(sc); sc->if_flags = ifp->if_flags; mtx_unlock(&sc->mtx); break; case SIOCADDMULTI: IOCTL_DBG_IF2(ifp, "SIOCADDMULTI"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { mtx_lock(&sc->mtx); ixlv_disable_intr(vsi); ixlv_add_multi(vsi); ixlv_enable_intr(vsi); mtx_unlock(&sc->mtx); } break; case SIOCDELMULTI: IOCTL_DBG_IF2(ifp, "SIOCDELMULTI"); if (sc->init_state == IXLV_RUNNING) { mtx_lock(&sc->mtx); ixlv_disable_intr(vsi); ixlv_del_multi(vsi); ixlv_enable_intr(vsi); mtx_unlock(&sc->mtx); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)"); error = ifmedia_ioctl(ifp, ifr, &sc->media, command); break; case SIOCSIFCAP: { int mask = ifr->ifr_reqcap ^ ifp->if_capenable; IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)"); ixlv_cap_txcsum_tso(vsi, ifp, mask); if (mask & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; if (mask & IFCAP_RXCSUM_IPV6) ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; if (mask & IFCAP_LRO) ifp->if_capenable ^= IFCAP_LRO; if (mask & IFCAP_VLAN_HWTAGGING) ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (mask & IFCAP_VLAN_HWFILTER) ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; if (mask & IFCAP_VLAN_HWTSO) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ixlv_init(vsi); } VLAN_CAPABILITIES(ifp); break; } default: IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command); error = ether_ioctl(ifp, command, data); break; } return (error); } /* ** To do a reinit on the VF is unfortunately more complicated ** than a physical device, we must have the PF more or less ** completely recreate our memory, so many things that were ** done only once at attach in traditional drivers now must be ** redone at each reinitialization. This function does that ** 'prelude' so we can then call the normal locked init code. */ int ixlv_reinit_locked(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; struct ixl_vsi *vsi = &sc->vsi; struct ifnet *ifp = vsi->ifp; struct ixlv_mac_filter *mf, *mf_temp; struct ixlv_vlan_filter *vf; int error = 0; INIT_DBG_IF(ifp, "begin"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ixlv_stop(sc); error = ixlv_reset(sc); INIT_DBG_IF(ifp, "VF was reset"); /* set the state in case we went thru RESET */ sc->init_state = IXLV_RUNNING; /* ** Resetting the VF drops all filters from hardware; ** we need to mark them to be re-added in init. */ SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) { if (mf->flags & IXL_FILTER_DEL) { SLIST_REMOVE(sc->mac_filters, mf, ixlv_mac_filter, next); free(mf, M_DEVBUF); } else mf->flags |= IXL_FILTER_ADD; } if (vsi->num_vlans != 0) SLIST_FOREACH(vf, sc->vlan_filters, next) vf->flags = IXL_FILTER_ADD; else { /* clean any stale filters */ while (!SLIST_EMPTY(sc->vlan_filters)) { vf = SLIST_FIRST(sc->vlan_filters); SLIST_REMOVE_HEAD(sc->vlan_filters, next); free(vf, M_DEVBUF); } } ixlv_enable_adminq_irq(hw); ixl_vc_flush(&sc->vc_mgr); INIT_DBG_IF(ifp, "end"); return (error); } static void ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg, enum i40e_status_code code) { struct ixlv_sc *sc; sc = arg; /* * Ignore "Adapter Stopped" message as that happens if an ifconfig down * happens while a command is in progress, so we don't print an error * in that case. */ if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) { if_printf(sc->vsi.ifp, "Error %s waiting for PF to complete operation %d\n", i40e_stat_str(&sc->hw, code), cmd->request); } } static void ixlv_init_locked(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; struct ixl_vsi *vsi = &sc->vsi; struct ixl_queue *que = vsi->queues; struct ifnet *ifp = vsi->ifp; int error = 0; INIT_DBG_IF(ifp, "begin"); IXLV_CORE_LOCK_ASSERT(sc); /* Do a reinit first if an init has already been done */ if ((sc->init_state == IXLV_RUNNING) || (sc->init_state == IXLV_RESET_REQUIRED) || (sc->init_state == IXLV_RESET_PENDING)) error = ixlv_reinit_locked(sc); /* Don't bother with init if we failed reinit */ if (error) goto init_done; /* Remove existing MAC filter if new MAC addr is set */ if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) { error = ixlv_del_mac_filter(sc, hw->mac.addr); if (error == 0) ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd, IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete, sc); } /* Check for an LAA mac address... */ bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN); ifp->if_hwassist = 0; if (ifp->if_capenable & IFCAP_TSO) ifp->if_hwassist |= CSUM_TSO; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP); if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) ifp->if_hwassist |= CSUM_OFFLOAD_IPV6; /* Add mac filter for this VF to PF */ if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) { error = ixlv_add_mac_filter(sc, hw->mac.addr, 0); if (!error || error == EEXIST) ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd, IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete, sc); } /* Setup vlan's if needed */ ixlv_setup_vlan_filters(sc); /* Prepare the queues for operation */ for (int i = 0; i < vsi->num_queues; i++, que++) { struct rx_ring *rxr = &que->rxr; ixl_init_tx_ring(que); if (vsi->max_frame_size <= MCLBYTES) rxr->mbuf_sz = MCLBYTES; else rxr->mbuf_sz = MJUMPAGESIZE; ixl_init_rx_ring(que); } /* Set initial ITR values */ ixlv_configure_itr(sc); /* Configure queues */ ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd, IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc); /* Set up RSS */ ixlv_config_rss(sc); /* Map vectors */ ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd, IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc); /* Enable queues */ ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd, IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc); /* Start the local timer */ callout_reset(&sc->timer, hz, ixlv_local_timer, sc); sc->init_state = IXLV_RUNNING; init_done: INIT_DBG_IF(ifp, "end"); return; } /* ** Init entry point for the stack */ void ixlv_init(void *arg) { struct ixl_vsi *vsi = (struct ixl_vsi *)arg; struct ixlv_sc *sc = vsi->back; int retries = 0; /* Prevent init from running again while waiting for AQ calls * made in init_locked() to complete. */ mtx_lock(&sc->mtx); if (sc->init_in_progress) { mtx_unlock(&sc->mtx); return; } else sc->init_in_progress = true; ixlv_init_locked(sc); mtx_unlock(&sc->mtx); /* Wait for init_locked to finish */ while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) && ++retries < IXLV_MAX_INIT_WAIT) { i40e_msec_pause(25); } if (retries >= IXLV_MAX_INIT_WAIT) { if_printf(vsi->ifp, "Init failed to complete in allotted time!\n"); } mtx_lock(&sc->mtx); sc->init_in_progress = false; mtx_unlock(&sc->mtx); } /* * ixlv_attach() helper function; gathers information about * the (virtual) hardware for use elsewhere in the driver. */ static void ixlv_init_hw(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; device_t dev = sc->dev; /* Save off the information about this board */ hw->vendor_id = pci_get_vendor(dev); hw->device_id = pci_get_device(dev); hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); hw->subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); hw->subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); hw->bus.device = pci_get_slot(dev); hw->bus.func = pci_get_function(dev); } /* * ixlv_attach() helper function; initalizes the admin queue * and attempts to establish contact with the PF by * retrying the initial "API version" message several times * or until the PF responds. */ static int ixlv_setup_vc(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; device_t dev = sc->dev; int error = 0, ret_error = 0, asq_retries = 0; bool send_api_ver_retried = 0; /* Need to set these AQ paramters before initializing AQ */ hw->aq.num_arq_entries = IXL_AQ_LEN; hw->aq.num_asq_entries = IXL_AQ_LEN; hw->aq.arq_buf_size = IXL_AQ_BUF_SZ; hw->aq.asq_buf_size = IXL_AQ_BUF_SZ; for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) { /* Initialize admin queue */ error = i40e_init_adminq(hw); if (error) { device_printf(dev, "%s: init_adminq failed: %d\n", __func__, error); ret_error = 1; continue; } INIT_DBG_DEV(dev, "Initialized Admin Queue; starting" " send_api_ver attempt %d", i+1); retry_send: /* Send VF's API version */ error = ixlv_send_api_ver(sc); if (error) { i40e_shutdown_adminq(hw); ret_error = 2; device_printf(dev, "%s: unable to send api" " version to PF on attempt %d, error %d\n", __func__, i+1, error); } asq_retries = 0; while (!i40e_asq_done(hw)) { if (++asq_retries > IXLV_AQ_MAX_ERR) { i40e_shutdown_adminq(hw); device_printf(dev, "Admin Queue timeout " "(waiting for send_api_ver), %d more tries...\n", IXLV_AQ_MAX_ERR - (i + 1)); ret_error = 3; break; } i40e_msec_pause(10); } if (asq_retries > IXLV_AQ_MAX_ERR) continue; INIT_DBG_DEV(dev, "Sent API version message to PF"); /* Verify that the VF accepts the PF's API version */ error = ixlv_verify_api_ver(sc); if (error == ETIMEDOUT) { if (!send_api_ver_retried) { /* Resend message, one more time */ send_api_ver_retried = true; device_printf(dev, "%s: Timeout while verifying API version on first" " try!\n", __func__); goto retry_send; } else { device_printf(dev, "%s: Timeout while verifying API version on second" " try!\n", __func__); ret_error = 4; break; } } if (error) { device_printf(dev, "%s: Unable to verify API version," " error %s\n", __func__, i40e_stat_str(hw, error)); ret_error = 5; } break; } if (ret_error >= 4) i40e_shutdown_adminq(hw); return (ret_error); } /* * ixlv_attach() helper function; asks the PF for this VF's * configuration, and saves the information if it receives it. */ static int ixlv_vf_config(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; device_t dev = sc->dev; int bufsz, error = 0, ret_error = 0; int asq_retries, retried = 0; retry_config: error = ixlv_send_vf_config_msg(sc); if (error) { device_printf(dev, "%s: Unable to send VF config request, attempt %d," " error %d\n", __func__, retried + 1, error); ret_error = 2; } asq_retries = 0; while (!i40e_asq_done(hw)) { if (++asq_retries > IXLV_AQ_MAX_ERR) { device_printf(dev, "%s: Admin Queue timeout " "(waiting for send_vf_config_msg), attempt %d\n", __func__, retried + 1); ret_error = 3; goto fail; } i40e_msec_pause(10); } INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d", retried + 1); if (!sc->vf_res) { bufsz = sizeof(struct virtchnl_vf_resource) + (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource)); sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT); if (!sc->vf_res) { device_printf(dev, "%s: Unable to allocate memory for VF configuration" " message from PF on attempt %d\n", __func__, retried + 1); ret_error = 1; goto fail; } } /* Check for VF config response */ error = ixlv_get_vf_config(sc); if (error == ETIMEDOUT) { /* The 1st time we timeout, send the configuration message again */ if (!retried) { retried++; goto retry_config; } device_printf(dev, "%s: ixlv_get_vf_config() timed out waiting for a response\n", __func__); } if (error) { device_printf(dev, "%s: Unable to get VF configuration from PF after %d tries!\n", __func__, retried + 1); ret_error = 4; } goto done; fail: free(sc->vf_res, M_DEVBUF); done: return (ret_error); } /* * Allocate MSI/X vectors, setup the AQ vector early */ static int ixlv_init_msix(struct ixlv_sc *sc) { device_t dev = sc->dev; int rid, want, vectors, queues, available; int auto_max_queues; rid = PCIR_BAR(IXL_MSIX_BAR); sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->msix_mem) { /* May not be enabled */ device_printf(sc->dev, "Unable to map MSIX table\n"); goto fail; } available = pci_msix_count(dev); if (available == 0) { /* system has msix disabled */ bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->msix_mem); sc->msix_mem = NULL; goto fail; } /* Clamp queues to number of CPUs and # of MSI-X vectors available */ auto_max_queues = min(mp_ncpus, available - 1); /* Clamp queues to # assigned to VF by PF */ auto_max_queues = min(auto_max_queues, sc->vf_res->num_queue_pairs); /* Override with tunable value if tunable is less than autoconfig count */ if ((ixlv_max_queues != 0) && (ixlv_max_queues <= auto_max_queues)) queues = ixlv_max_queues; /* Use autoconfig amount if that's lower */ else if ((ixlv_max_queues != 0) && (ixlv_max_queues > auto_max_queues)) { device_printf(dev, "ixlv_max_queues (%d) is too large, using " "autoconfig amount (%d)...\n", ixlv_max_queues, auto_max_queues); queues = auto_max_queues; } /* Limit maximum auto-configured queues to 8 if no user value is set */ else queues = min(auto_max_queues, 8); #ifdef RSS /* If we're doing RSS, clamp at the number of RSS buckets */ if (queues > rss_getnumbuckets()) queues = rss_getnumbuckets(); #endif /* ** Want one vector (RX/TX pair) per queue ** plus an additional for the admin queue. */ want = queues + 1; if (want <= available) /* Have enough */ vectors = want; else { device_printf(sc->dev, "MSIX Configuration Problem, " "%d vectors available but %d wanted!\n", available, want); goto fail; } #ifdef RSS /* * If we're doing RSS, the number of queues needs to * match the number of RSS buckets that are configured. * * + If there's more queues than RSS buckets, we'll end * up with queues that get no traffic. * * + If there's more RSS buckets than queues, we'll end * up having multiple RSS buckets map to the same queue, * so there'll be some contention. */ if (queues != rss_getnumbuckets()) { device_printf(dev, "%s: queues (%d) != RSS buckets (%d)" "; performance will be impacted.\n", __func__, queues, rss_getnumbuckets()); } #endif if (pci_alloc_msix(dev, &vectors) == 0) { device_printf(sc->dev, "Using MSIX interrupts with %d vectors\n", vectors); sc->msix = vectors; sc->vsi.num_queues = queues; } /* Next we need to setup the vector for the Admin Queue */ rid = 1; /* zero vector + 1 */ sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->res == NULL) { device_printf(dev, "Unable to allocate" " bus resource: AQ interrupt \n"); goto fail; } if (bus_setup_intr(dev, sc->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, ixlv_msix_adminq, sc, &sc->tag)) { sc->res = NULL; device_printf(dev, "Failed to register AQ handler"); goto fail; } bus_describe_intr(dev, sc->res, sc->tag, "adminq"); return (vectors); fail: /* The VF driver MUST use MSIX */ return (0); } static int ixlv_allocate_pci_resources(struct ixlv_sc *sc) { int rid; device_t dev = sc->dev; rid = PCIR_BAR(0); sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!(sc->pci_mem)) { device_printf(dev, "Unable to allocate bus resource: memory\n"); return (ENXIO); } sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem); sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->pci_mem); sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem); sc->osdep.flush_reg = I40E_VFGEN_RSTAT; sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle; sc->hw.back = &sc->osdep; ixl_set_busmaster(dev); ixl_set_msix_enable(dev); /* Disable adminq interrupts (just in case) */ ixlv_disable_adminq_irq(&sc->hw); return (0); } /* * Free MSI-X related resources for a single queue */ static void ixlv_free_msix_resources(struct ixlv_sc *sc, struct ixl_queue *que) { device_t dev = sc->dev; /* ** Release all msix queue resources: */ if (que->tag != NULL) { bus_teardown_intr(dev, que->res, que->tag); que->tag = NULL; } if (que->res != NULL) { int rid = que->msix + 1; bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); que->res = NULL; } if (que->tq != NULL) { taskqueue_free(que->tq); que->tq = NULL; } } static void ixlv_free_pci_resources(struct ixlv_sc *sc) { device_t dev = sc->dev; pci_release_msi(dev); if (sc->msix_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(IXL_MSIX_BAR), sc->msix_mem); if (sc->pci_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->pci_mem); } /* * Create taskqueue and tasklet for Admin Queue interrupts. */ static int ixlv_init_taskqueue(struct ixlv_sc *sc) { int error = 0; TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc); sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT, taskqueue_thread_enqueue, &sc->tq); taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq", device_get_nameunit(sc->dev)); return (error); } /********************************************************************* * * Setup MSIX Interrupt resources and handlers for the VSI queues * **********************************************************************/ static int ixlv_assign_msix(struct ixlv_sc *sc) { device_t dev = sc->dev; struct ixl_vsi *vsi = &sc->vsi; struct ixl_queue *que = vsi->queues; struct tx_ring *txr; int error, rid, vector = 1; #ifdef RSS cpuset_t cpu_mask; #endif for (int i = 0; i < vsi->num_queues; i++, vector++, que++) { int cpu_id = i; rid = vector + 1; txr = &que->txr; que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (que->res == NULL) { device_printf(dev,"Unable to allocate" " bus resource: que interrupt [%d]\n", vector); return (ENXIO); } /* Set the handler function */ error = bus_setup_intr(dev, que->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, ixlv_msix_que, que, &que->tag); if (error) { que->tag = NULL; device_printf(dev, "Failed to register que handler"); return (error); } bus_describe_intr(dev, que->res, que->tag, "que %d", i); /* Bind the vector to a CPU */ #ifdef RSS cpu_id = rss_getcpu(i % rss_getnumbuckets()); #endif bus_bind_intr(dev, que->res, cpu_id); que->msix = vector; TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que); TASK_INIT(&que->task, 0, ixlv_handle_que, que); que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT, taskqueue_thread_enqueue, &que->tq); #ifdef RSS CPU_SETOF(cpu_id, &cpu_mask); taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET, &cpu_mask, "%s (bucket %d)", device_get_nameunit(dev), cpu_id); #else taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", device_get_nameunit(dev)); #endif } return (0); } /* + * Special implementation of pause for reset flow because + * there is a lock used. + */ +static void +ixlv_msec_pause(int msecs) +{ + int ticks_to_pause = (msecs * hz) / 1000; + int start_ticks = ticks; + + if (cold || SCHEDULER_STOPPED()) { + i40e_msec_delay(msecs); + return; + } + + while (1) { + kern_yield(PRI_USER); + int yielded_ticks = ticks - start_ticks; + if (yielded_ticks > ticks_to_pause) + break; + else if (yielded_ticks < 0 + && (yielded_ticks + INT_MAX + 1 > ticks_to_pause)) { + break; + } + } +} + +/* ** Requests a VF reset from the PF. ** ** Requires the VF's Admin Queue to be initialized. */ static int ixlv_reset(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; device_t dev = sc->dev; int error = 0; /* Ask the PF to reset us if we are initiating */ if (sc->init_state != IXLV_RESET_PENDING) ixlv_request_reset(sc); - i40e_msec_pause(100); + ixlv_msec_pause(100); error = ixlv_reset_complete(hw); if (error) { device_printf(dev, "%s: VF reset failed\n", __func__); return (error); } error = i40e_shutdown_adminq(hw); if (error) { device_printf(dev, "%s: shutdown_adminq failed: %d\n", __func__, error); return (error); } error = i40e_init_adminq(hw); if (error) { device_printf(dev, "%s: init_adminq failed: %d\n", __func__, error); return(error); } return (0); } static int ixlv_reset_complete(struct i40e_hw *hw) { u32 reg; /* Wait up to ~10 seconds */ for (int i = 0; i < 100; i++) { reg = rd32(hw, I40E_VFGEN_RSTAT) & I40E_VFGEN_RSTAT_VFR_STATE_MASK; if ((reg == VIRTCHNL_VFR_VFACTIVE) || (reg == VIRTCHNL_VFR_COMPLETED)) return (0); - i40e_msec_pause(100); + ixlv_msec_pause(100); } return (EBUSY); } /********************************************************************* * * Setup networking device structure and register an interface. * **********************************************************************/ static int ixlv_setup_interface(device_t dev, struct ixlv_sc *sc) { struct ifnet *ifp; struct ixl_vsi *vsi = &sc->vsi; struct ixl_queue *que = vsi->queues; INIT_DBG_DEV(dev, "begin"); ifp = vsi->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "%s: could not allocate ifnet" " structure!\n", __func__); return (-1); } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; #if __FreeBSD_version >= 1100000 ifp->if_baudrate = IF_Gbps(40); #else if_initbaudrate(ifp, IF_Gbps(40)); #endif ifp->if_init = ixlv_init; ifp->if_softc = vsi; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = ixlv_ioctl; #if __FreeBSD_version >= 1100000 if_setgetcounterfn(ifp, ixl_get_counter); #endif ifp->if_transmit = ixl_mq_start; ifp->if_qflush = ixl_qflush; ifp->if_snd.ifq_maxlen = que->num_tx_desc - 2; ether_ifattach(ifp, sc->hw.mac.addr); vsi->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN); ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS; ifp->if_hw_tsomaxsegsize = IXL_MAX_DMA_SEG_SIZE; /* * Tell the upper layer(s) we support long frames. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_HWCSUM; ifp->if_capabilities |= IFCAP_HWCSUM_IPV6; ifp->if_capabilities |= IFCAP_TSO; ifp->if_capabilities |= IFCAP_JUMBO_MTU; ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM | IFCAP_LRO; ifp->if_capenable = ifp->if_capabilities; /* ** Don't turn this on by default, if vlans are ** created on another pseudo device (eg. lagg) ** then vlan events are not passed thru, breaking ** operation, but with HW FILTER off it works. If ** using vlans directly on the ixl driver you can ** enable this and get full hardware tag filtering. */ ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; /* * Specify the media types supported by this adapter and register * callbacks to update media and link information */ ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change, ixlv_media_status); /* Media types based on reported link speed over AdminQ */ ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL); ifmedia_add(&sc->media, IFM_ETHER | IFM_25G_SR, 0, NULL); ifmedia_add(&sc->media, IFM_ETHER | IFM_40G_SR4, 0, NULL); ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); INIT_DBG_DEV(dev, "end"); return (0); } /* ** Allocate and setup a single queue */ static int ixlv_setup_queue(struct ixlv_sc *sc, struct ixl_queue *que) { device_t dev = sc->dev; struct tx_ring *txr; struct rx_ring *rxr; int rsize, tsize; int error = I40E_SUCCESS; txr = &que->txr; txr->que = que; txr->tail = I40E_QTX_TAIL1(que->me); /* Initialize the TX lock */ snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", device_get_nameunit(dev), que->me); mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF); /* * Create the TX descriptor ring * * In Head Writeback mode, the descriptor ring is one bigger * than the number of descriptors for space for the HW to * write back index of last completed descriptor. */ if (sc->vsi.enable_head_writeback) { tsize = roundup2((que->num_tx_desc * sizeof(struct i40e_tx_desc)) + sizeof(u32), DBA_ALIGN); } else { tsize = roundup2((que->num_tx_desc * sizeof(struct i40e_tx_desc)), DBA_ALIGN); } if (i40e_allocate_dma_mem(&sc->hw, &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) { device_printf(dev, "Unable to allocate TX Descriptor memory\n"); error = ENOMEM; goto err_destroy_tx_mtx; } txr->base = (struct i40e_tx_desc *)txr->dma.va; bzero((void *)txr->base, tsize); /* Now allocate transmit soft structs for the ring */ if (ixl_allocate_tx_data(que)) { device_printf(dev, "Critical Failure setting up TX structures\n"); error = ENOMEM; goto err_free_tx_dma; } /* Allocate a buf ring */ txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF, M_WAITOK, &txr->mtx); if (txr->br == NULL) { device_printf(dev, "Critical Failure setting up TX buf ring\n"); error = ENOMEM; goto err_free_tx_data; } /* * Next the RX queues... */ rsize = roundup2(que->num_rx_desc * sizeof(union i40e_rx_desc), DBA_ALIGN); rxr = &que->rxr; rxr->que = que; rxr->tail = I40E_QRX_TAIL1(que->me); /* Initialize the RX side lock */ snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", device_get_nameunit(dev), que->me); mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF); if (i40e_allocate_dma_mem(&sc->hw, &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA? device_printf(dev, "Unable to allocate RX Descriptor memory\n"); error = ENOMEM; goto err_destroy_rx_mtx; } rxr->base = (union i40e_rx_desc *)rxr->dma.va; bzero((void *)rxr->base, rsize); /* Allocate receive soft structs for the ring */ if (ixl_allocate_rx_data(que)) { device_printf(dev, "Critical Failure setting up receive structs\n"); error = ENOMEM; goto err_free_rx_dma; } return (0); err_free_rx_dma: i40e_free_dma_mem(&sc->hw, &rxr->dma); err_destroy_rx_mtx: mtx_destroy(&rxr->mtx); /* err_free_tx_buf_ring */ buf_ring_free(txr->br, M_DEVBUF); err_free_tx_data: ixl_free_que_tx(que); err_free_tx_dma: i40e_free_dma_mem(&sc->hw, &txr->dma); err_destroy_tx_mtx: mtx_destroy(&txr->mtx); return (error); } /* ** Allocate and setup the interface queues */ static int ixlv_setup_queues(struct ixlv_sc *sc) { device_t dev = sc->dev; struct ixl_vsi *vsi; struct ixl_queue *que; int i; int error = I40E_SUCCESS; vsi = &sc->vsi; vsi->back = (void *)sc; vsi->hw = &sc->hw; vsi->num_vlans = 0; /* Get memory for the station queues */ if (!(vsi->queues = (struct ixl_queue *) malloc(sizeof(struct ixl_queue) * vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate queue memory\n"); return ENOMEM; } for (i = 0; i < vsi->num_queues; i++) { que = &vsi->queues[i]; que->num_tx_desc = vsi->num_tx_desc; que->num_rx_desc = vsi->num_rx_desc; que->me = i; que->vsi = vsi; if (ixlv_setup_queue(sc, que)) { error = ENOMEM; goto err_free_queues; } } + sysctl_ctx_init(&vsi->sysctl_ctx); return (0); err_free_queues: while (i--) ixlv_free_queue(sc, &vsi->queues[i]); free(vsi->queues, M_DEVBUF); return (error); } /* ** This routine is run via an vlan config EVENT, ** it enables us to use the HW Filter table since ** we can get the vlan id. This just creates the ** entry in the soft version of the VFTA, init will ** repopulate the real table. */ static void ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) { struct ixl_vsi *vsi = arg; struct ixlv_sc *sc = vsi->back; struct ixlv_vlan_filter *v; if (ifp->if_softc != arg) /* Not our event */ return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; /* Sanity check - make sure it doesn't already exist */ SLIST_FOREACH(v, sc->vlan_filters, next) { if (v->vlan == vtag) return; } mtx_lock(&sc->mtx); ++vsi->num_vlans; v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO); SLIST_INSERT_HEAD(sc->vlan_filters, v, next); v->vlan = vtag; v->flags = IXL_FILTER_ADD; ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd, IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc); mtx_unlock(&sc->mtx); return; } /* ** This routine is run via an vlan ** unconfig EVENT, remove our entry ** in the soft vfta. */ static void ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) { struct ixl_vsi *vsi = arg; struct ixlv_sc *sc = vsi->back; struct ixlv_vlan_filter *v; int i = 0; if (ifp->if_softc != arg) return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; mtx_lock(&sc->mtx); SLIST_FOREACH(v, sc->vlan_filters, next) { if (v->vlan == vtag) { v->flags = IXL_FILTER_DEL; ++i; --vsi->num_vlans; } } if (i) ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd, IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc); mtx_unlock(&sc->mtx); return; } /* ** Get a new filter and add it to the mac filter list. */ static struct ixlv_mac_filter * ixlv_get_mac_filter(struct ixlv_sc *sc) { struct ixlv_mac_filter *f; f = malloc(sizeof(struct ixlv_mac_filter), M_DEVBUF, M_NOWAIT | M_ZERO); if (f) SLIST_INSERT_HEAD(sc->mac_filters, f, next); return (f); } /* ** Find the filter with matching MAC address */ static struct ixlv_mac_filter * ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr) { struct ixlv_mac_filter *f; bool match = FALSE; SLIST_FOREACH(f, sc->mac_filters, next) { if (cmp_etheraddr(f->macaddr, macaddr)) { match = TRUE; break; } } if (!match) f = NULL; return (f); } static int ixlv_teardown_adminq_msix(struct ixlv_sc *sc) { device_t dev = sc->dev; int error = 0; if (sc->tag != NULL) { bus_teardown_intr(dev, sc->res, sc->tag); if (error) { device_printf(dev, "bus_teardown_intr() for" " interrupt 0 failed\n"); // return (ENXIO); } sc->tag = NULL; } if (sc->res != NULL) { bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res); if (error) { device_printf(dev, "bus_release_resource() for" " interrupt 0 failed\n"); // return (ENXIO); } sc->res = NULL; } return (0); } /* ** Admin Queue interrupt handler */ static void ixlv_msix_adminq(void *arg) { struct ixlv_sc *sc = arg; struct i40e_hw *hw = &sc->hw; u32 reg, mask; reg = rd32(hw, I40E_VFINT_ICR01); mask = rd32(hw, I40E_VFINT_ICR0_ENA1); reg = rd32(hw, I40E_VFINT_DYN_CTL01); reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK; wr32(hw, I40E_VFINT_DYN_CTL01, reg); /* schedule task */ taskqueue_enqueue(sc->tq, &sc->aq_irq); return; } void ixlv_enable_intr(struct ixl_vsi *vsi) { struct i40e_hw *hw = vsi->hw; struct ixl_queue *que = vsi->queues; ixlv_enable_adminq_irq(hw); for (int i = 0; i < vsi->num_queues; i++, que++) ixlv_enable_queue_irq(hw, que->me); } void ixlv_disable_intr(struct ixl_vsi *vsi) { struct i40e_hw *hw = vsi->hw; struct ixl_queue *que = vsi->queues; ixlv_disable_adminq_irq(hw); for (int i = 0; i < vsi->num_queues; i++, que++) ixlv_disable_queue_irq(hw, que->me); } static void ixlv_disable_adminq_irq(struct i40e_hw *hw) { wr32(hw, I40E_VFINT_DYN_CTL01, 0); wr32(hw, I40E_VFINT_ICR0_ENA1, 0); /* flush */ rd32(hw, I40E_VFGEN_RSTAT); return; } static void ixlv_enable_adminq_irq(struct i40e_hw *hw) { wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK | I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK); /* flush */ rd32(hw, I40E_VFGEN_RSTAT); return; } static void ixlv_enable_queue_irq(struct i40e_hw *hw, int id) { u32 reg; reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK | I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg); } static void ixlv_disable_queue_irq(struct i40e_hw *hw, int id) { wr32(hw, I40E_VFINT_DYN_CTLN1(id), I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK); rd32(hw, I40E_VFGEN_RSTAT); return; } /* * Get initial ITR values from tunable values. */ static void ixlv_configure_itr(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; struct ixl_vsi *vsi = &sc->vsi; struct ixl_queue *que = vsi->queues; vsi->rx_itr_setting = ixlv_rx_itr; vsi->tx_itr_setting = ixlv_tx_itr; for (int i = 0; i < vsi->num_queues; i++, que++) { struct tx_ring *txr = &que->txr; struct rx_ring *rxr = &que->rxr; wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i), vsi->rx_itr_setting); rxr->itr = vsi->rx_itr_setting; rxr->latency = IXL_AVE_LATENCY; wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i), vsi->tx_itr_setting); txr->itr = vsi->tx_itr_setting; txr->latency = IXL_AVE_LATENCY; } } /* ** Provide a update to the queue RX ** interrupt moderation value. */ static void ixlv_set_queue_rx_itr(struct ixl_queue *que) { struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct rx_ring *rxr = &que->rxr; u16 rx_itr; u16 rx_latency = 0; int rx_bytes; /* Idle, do nothing */ if (rxr->bytes == 0) return; if (ixlv_dynamic_rx_itr) { rx_bytes = rxr->bytes/rxr->itr; rx_itr = rxr->itr; /* Adjust latency range */ switch (rxr->latency) { case IXL_LOW_LATENCY: if (rx_bytes > 10) { rx_latency = IXL_AVE_LATENCY; rx_itr = IXL_ITR_20K; } break; case IXL_AVE_LATENCY: if (rx_bytes > 20) { rx_latency = IXL_BULK_LATENCY; rx_itr = IXL_ITR_8K; } else if (rx_bytes <= 10) { rx_latency = IXL_LOW_LATENCY; rx_itr = IXL_ITR_100K; } break; case IXL_BULK_LATENCY: if (rx_bytes <= 20) { rx_latency = IXL_AVE_LATENCY; rx_itr = IXL_ITR_20K; } break; } rxr->latency = rx_latency; if (rx_itr != rxr->itr) { /* do an exponential smoothing */ rx_itr = (10 * rx_itr * rxr->itr) / ((9 * rx_itr) + rxr->itr); rxr->itr = min(rx_itr, IXL_MAX_ITR); wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, que->me), rxr->itr); } } else { /* We may have have toggled to non-dynamic */ if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC) vsi->rx_itr_setting = ixlv_rx_itr; /* Update the hardware if needed */ if (rxr->itr != vsi->rx_itr_setting) { rxr->itr = vsi->rx_itr_setting; wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, que->me), rxr->itr); } } rxr->bytes = 0; rxr->packets = 0; return; } /* ** Provide a update to the queue TX ** interrupt moderation value. */ static void ixlv_set_queue_tx_itr(struct ixl_queue *que) { struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct tx_ring *txr = &que->txr; u16 tx_itr; u16 tx_latency = 0; int tx_bytes; /* Idle, do nothing */ if (txr->bytes == 0) return; if (ixlv_dynamic_tx_itr) { tx_bytes = txr->bytes/txr->itr; tx_itr = txr->itr; switch (txr->latency) { case IXL_LOW_LATENCY: if (tx_bytes > 10) { tx_latency = IXL_AVE_LATENCY; tx_itr = IXL_ITR_20K; } break; case IXL_AVE_LATENCY: if (tx_bytes > 20) { tx_latency = IXL_BULK_LATENCY; tx_itr = IXL_ITR_8K; } else if (tx_bytes <= 10) { tx_latency = IXL_LOW_LATENCY; tx_itr = IXL_ITR_100K; } break; case IXL_BULK_LATENCY: if (tx_bytes <= 20) { tx_latency = IXL_AVE_LATENCY; tx_itr = IXL_ITR_20K; } break; } txr->latency = tx_latency; if (tx_itr != txr->itr) { /* do an exponential smoothing */ tx_itr = (10 * tx_itr * txr->itr) / ((9 * tx_itr) + txr->itr); txr->itr = min(tx_itr, IXL_MAX_ITR); wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, que->me), txr->itr); } } else { /* We may have have toggled to non-dynamic */ if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC) vsi->tx_itr_setting = ixlv_tx_itr; /* Update the hardware if needed */ if (txr->itr != vsi->tx_itr_setting) { txr->itr = vsi->tx_itr_setting; wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, que->me), txr->itr); } } txr->bytes = 0; txr->packets = 0; return; } /* ** ** MSIX Interrupt Handlers and Tasklets ** */ static void ixlv_handle_que(void *context, int pending) { struct ixl_queue *que = context; struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct tx_ring *txr = &que->txr; struct ifnet *ifp = vsi->ifp; bool more; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { more = ixl_rxeof(que, IXL_RX_LIMIT); mtx_lock(&txr->mtx); ixl_txeof(que); if (!drbr_empty(ifp, txr->br)) ixl_mq_start_locked(ifp, txr); mtx_unlock(&txr->mtx); if (more) { taskqueue_enqueue(que->tq, &que->task); return; } } /* Reenable this interrupt - hmmm */ ixlv_enable_queue_irq(hw, que->me); return; } /********************************************************************* * * MSIX Queue Interrupt Service routine * **********************************************************************/ static void ixlv_msix_que(void *arg) { struct ixl_queue *que = arg; struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct tx_ring *txr = &que->txr; bool more_tx, more_rx; /* Spurious interrupts are ignored */ if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)) return; + /* There are drivers which disable auto-masking of interrupts, + * which is a global setting for all ports. We have to make sure + * to mask it to not lose IRQs */ + ixlv_disable_queue_irq(hw, que->me); + ++que->irqs; more_rx = ixl_rxeof(que, IXL_RX_LIMIT); mtx_lock(&txr->mtx); more_tx = ixl_txeof(que); /* ** Make certain that if the stack ** has anything queued the task gets ** scheduled to handle it. */ if (!drbr_empty(vsi->ifp, txr->br)) more_tx = 1; mtx_unlock(&txr->mtx); ixlv_set_queue_rx_itr(que); ixlv_set_queue_tx_itr(que); if (more_tx || more_rx) taskqueue_enqueue(que->tq, &que->task); else ixlv_enable_queue_irq(hw, que->me); return; } /********************************************************************* * * Media Ioctl callback * * This routine is called whenever the user queries the status of * the interface using ifconfig. * **********************************************************************/ static void ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) { struct ixl_vsi *vsi = ifp->if_softc; struct ixlv_sc *sc = vsi->back; INIT_DBG_IF(ifp, "begin"); mtx_lock(&sc->mtx); ixlv_update_link_status(sc); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!sc->link_up) { mtx_unlock(&sc->mtx); INIT_DBG_IF(ifp, "end: link not up"); return; } ifmr->ifm_status |= IFM_ACTIVE; /* Hardware is always full-duplex */ ifmr->ifm_active |= IFM_FDX; /* Based on the link speed reported by the PF over the AdminQ, choose a * PHY type to report. This isn't 100% correct since we don't really * know the underlying PHY type of the PF, but at least we can report * a valid link speed... */ switch (sc->link_speed) { case VIRTCHNL_LINK_SPEED_100MB: ifmr->ifm_active |= IFM_100_TX; break; case VIRTCHNL_LINK_SPEED_1GB: ifmr->ifm_active |= IFM_1000_T; break; case VIRTCHNL_LINK_SPEED_10GB: ifmr->ifm_active |= IFM_10G_SR; break; case VIRTCHNL_LINK_SPEED_20GB: case VIRTCHNL_LINK_SPEED_25GB: ifmr->ifm_active |= IFM_25G_SR; break; case VIRTCHNL_LINK_SPEED_40GB: ifmr->ifm_active |= IFM_40G_SR4; break; default: ifmr->ifm_active |= IFM_UNKNOWN; break; } mtx_unlock(&sc->mtx); INIT_DBG_IF(ifp, "end"); return; } /********************************************************************* * * Media Ioctl callback * * This routine is called when the user changes speed/duplex using * media/mediopt option with ifconfig. * **********************************************************************/ static int ixlv_media_change(struct ifnet * ifp) { struct ixl_vsi *vsi = ifp->if_softc; struct ifmedia *ifm = &vsi->media; INIT_DBG_IF(ifp, "begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); if_printf(ifp, "Changing speed is not supported\n"); INIT_DBG_IF(ifp, "end"); return (ENODEV); } /********************************************************************* * Multicast Initialization * * This routine is called by init to reset a fresh state. * **********************************************************************/ static void ixlv_init_multi(struct ixl_vsi *vsi) { struct ixlv_mac_filter *f; struct ixlv_sc *sc = vsi->back; int mcnt = 0; IOCTL_DBG_IF(vsi->ifp, "begin"); /* First clear any multicast filters */ SLIST_FOREACH(f, sc->mac_filters, next) { if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) { f->flags |= IXL_FILTER_DEL; mcnt++; } } if (mcnt > 0) ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd, IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete, sc); IOCTL_DBG_IF(vsi->ifp, "end"); } static void ixlv_add_multi(struct ixl_vsi *vsi) { struct ifmultiaddr *ifma; struct ifnet *ifp = vsi->ifp; struct ixlv_sc *sc = vsi->back; int mcnt = 0; IOCTL_DBG_IF(ifp, "begin"); if_maddr_rlock(ifp); /* ** Get a count, to decide if we ** simply use multicast promiscuous. */ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; mcnt++; } if_maddr_runlock(ifp); /* TODO: Remove -- cannot set promiscuous mode in a VF */ if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { /* delete all multicast filters */ ixlv_init_multi(vsi); sc->promiscuous_flags |= FLAG_VF_MULTICAST_PROMISC; ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd, IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete, sc); IOCTL_DEBUGOUT("%s: end: too many filters", __func__); return; } mcnt = 0; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (!ixlv_add_mac_filter(sc, (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr), IXL_FILTER_MC)) mcnt++; } if_maddr_runlock(ifp); /* ** Notify AQ task that sw filters need to be ** added to hw list */ if (mcnt > 0) ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd, IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete, sc); IOCTL_DBG_IF(ifp, "end"); } static void ixlv_del_multi(struct ixl_vsi *vsi) { struct ixlv_mac_filter *f; struct ifmultiaddr *ifma; struct ifnet *ifp = vsi->ifp; struct ixlv_sc *sc = vsi->back; int mcnt = 0; bool match = FALSE; IOCTL_DBG_IF(ifp, "begin"); /* Search for removed multicast addresses */ if_maddr_rlock(ifp); SLIST_FOREACH(f, sc->mac_filters, next) { if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) { /* check if mac address in filter is in sc's list */ match = FALSE; TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr); if (cmp_etheraddr(f->macaddr, mc_addr)) { match = TRUE; break; } } /* if this filter is not in the sc's list, remove it */ if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) { f->flags |= IXL_FILTER_DEL; mcnt++; IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT, MAC_FORMAT_ARGS(f->macaddr)); } else if (match == FALSE) IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT, MAC_FORMAT_ARGS(f->macaddr)); } } if_maddr_runlock(ifp); if (mcnt > 0) ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd, IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete, sc); IOCTL_DBG_IF(ifp, "end"); } /********************************************************************* * Timer routine * * This routine checks for link status,updates statistics, * and runs the watchdog check. * **********************************************************************/ static void ixlv_local_timer(void *arg) { struct ixlv_sc *sc = arg; struct i40e_hw *hw = &sc->hw; struct ixl_vsi *vsi = &sc->vsi; u32 val; IXLV_CORE_LOCK_ASSERT(sc); /* If Reset is in progress just bail */ if (sc->init_state == IXLV_RESET_PENDING) return; /* Check for when PF triggers a VF reset */ val = rd32(hw, I40E_VFGEN_RSTAT) & I40E_VFGEN_RSTAT_VFR_STATE_MASK; if (val != VIRTCHNL_VFR_VFACTIVE && val != VIRTCHNL_VFR_COMPLETED) { DDPRINTF(sc->dev, "reset in progress! (%d)", val); return; } ixlv_request_stats(sc); /* clean and process any events */ taskqueue_enqueue(sc->tq, &sc->aq_irq); /* Increment stat when a queue shows hung */ if (ixl_queue_hang_check(vsi)) sc->watchdog_events++; callout_reset(&sc->timer, hz, ixlv_local_timer, sc); } /* ** Note: this routine updates the OS on the link state ** the real check of the hardware only happens with ** a link interrupt. */ void ixlv_update_link_status(struct ixlv_sc *sc) { struct ixl_vsi *vsi = &sc->vsi; struct ifnet *ifp = vsi->ifp; if (sc->link_up){ if (vsi->link_active == FALSE) { if (bootverbose) if_printf(ifp,"Link is Up, %s\n", ixlv_vc_speed_to_string(sc->link_speed)); vsi->link_active = TRUE; if_link_state_change(ifp, LINK_STATE_UP); } } else { /* Link down */ if (vsi->link_active == TRUE) { if (bootverbose) if_printf(ifp,"Link is Down\n"); if_link_state_change(ifp, LINK_STATE_DOWN); vsi->link_active = FALSE; } } return; } /********************************************************************* * * This routine disables all traffic on the adapter by issuing a * global reset on the MAC and deallocates TX/RX buffers. * **********************************************************************/ static void ixlv_stop(struct ixlv_sc *sc) { struct ifnet *ifp; int start; ifp = sc->vsi.ifp; INIT_DBG_IF(ifp, "begin"); IXLV_CORE_LOCK_ASSERT(sc); ixl_vc_flush(&sc->vc_mgr); ixlv_disable_queues(sc); start = ticks; while ((ifp->if_drv_flags & IFF_DRV_RUNNING) && ((ticks - start) < hz/10)) ixlv_do_adminq_locked(sc); /* Stop the local timer */ callout_stop(&sc->timer); INIT_DBG_IF(ifp, "end"); } /* Free a single queue struct */ static void ixlv_free_queue(struct ixlv_sc *sc, struct ixl_queue *que) { struct tx_ring *txr = &que->txr; struct rx_ring *rxr = &que->rxr; if (!mtx_initialized(&txr->mtx)) /* uninitialized */ return; IXL_TX_LOCK(txr); if (txr->br) buf_ring_free(txr->br, M_DEVBUF); ixl_free_que_tx(que); if (txr->base) i40e_free_dma_mem(&sc->hw, &txr->dma); IXL_TX_UNLOCK(txr); IXL_TX_LOCK_DESTROY(txr); if (!mtx_initialized(&rxr->mtx)) /* uninitialized */ return; IXL_RX_LOCK(rxr); ixl_free_que_rx(que); if (rxr->base) i40e_free_dma_mem(&sc->hw, &rxr->dma); IXL_RX_UNLOCK(rxr); IXL_RX_LOCK_DESTROY(rxr); } /********************************************************************* * * Free all station queue structs. * **********************************************************************/ static void ixlv_free_queues(struct ixl_vsi *vsi) { struct ixlv_sc *sc = (struct ixlv_sc *)vsi->back; struct ixl_queue *que = vsi->queues; for (int i = 0; i < vsi->num_queues; i++, que++) { /* First, free the MSI-X resources */ ixlv_free_msix_resources(sc, que); /* Then free other queue data */ ixlv_free_queue(sc, que); } + sysctl_ctx_free(&vsi->sysctl_ctx); free(vsi->queues, M_DEVBUF); } static void ixlv_config_rss_reg(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; struct ixl_vsi *vsi = &sc->vsi; u32 lut = 0; u64 set_hena = 0, hena; int i, j, que_id; u32 rss_seed[IXL_RSS_KEY_SIZE_REG]; #ifdef RSS u32 rss_hash_config; #endif /* Don't set up RSS if using a single queue */ if (vsi->num_queues == 1) { wr32(hw, I40E_VFQF_HENA(0), 0); wr32(hw, I40E_VFQF_HENA(1), 0); ixl_flush(hw); return; } #ifdef RSS /* Fetch the configured RSS key */ rss_getkey((uint8_t *) &rss_seed); #else ixl_get_default_rss_key(rss_seed); #endif /* Fill out hash function seed */ for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]); /* Enable PCTYPES for RSS: */ #ifdef RSS rss_hash_config = rss_gethashconfig(); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP); #else set_hena = IXL_DEFAULT_RSS_HENA_XL710; #endif hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) | ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32); hena |= set_hena; wr32(hw, I40E_VFQF_HENA(0), (u32)hena); wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); /* Populate the LUT with max no. of queues in round robin fashion */ for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) { if (j == vsi->num_queues) j = 0; #ifdef RSS /* * Fetch the RSS bucket id for the given indirection entry. * Cap it at the number of configured buckets (which is * num_queues.) */ que_id = rss_get_indirection_to_bucket(i); que_id = que_id % vsi->num_queues; #else que_id = j; #endif /* lut = 4-byte sliding window of 4 lut entries */ lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK); /* On i = 3, we have 4 entries in lut; write to the register */ if ((i & 3) == 3) { wr32(hw, I40E_VFQF_HLUT(i >> 2), lut); DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut); } } ixl_flush(hw); } static void ixlv_config_rss_pf(struct ixlv_sc *sc) { ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_key_cmd, IXLV_FLAG_AQ_CONFIG_RSS_KEY, ixl_init_cmd_complete, sc); ixl_vc_enqueue(&sc->vc_mgr, &sc->set_rss_hena_cmd, IXLV_FLAG_AQ_SET_RSS_HENA, ixl_init_cmd_complete, sc); ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_lut_cmd, IXLV_FLAG_AQ_CONFIG_RSS_LUT, ixl_init_cmd_complete, sc); } /* ** ixlv_config_rss - setup RSS ** ** RSS keys and table are cleared on VF reset. */ static void ixlv_config_rss(struct ixlv_sc *sc) { if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_REG) { DDPRINTF(sc->dev, "Setting up RSS using VF registers..."); ixlv_config_rss_reg(sc); } else if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { DDPRINTF(sc->dev, "Setting up RSS using messages to PF..."); ixlv_config_rss_pf(sc); } else device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n"); } /* ** This routine refreshes vlan filters, called by init ** it scans the filter table and then updates the AQ */ static void ixlv_setup_vlan_filters(struct ixlv_sc *sc) { struct ixl_vsi *vsi = &sc->vsi; struct ixlv_vlan_filter *f; int cnt = 0; if (vsi->num_vlans == 0) return; /* ** Scan the filter table for vlan entries, ** and if found call for the AQ update. */ SLIST_FOREACH(f, sc->vlan_filters, next) if (f->flags & IXL_FILTER_ADD) cnt++; if (cnt > 0) ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd, IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc); } /* ** This routine adds new MAC filters to the sc's list; ** these are later added in hardware by sending a virtual ** channel message. */ static int ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags) { struct ixlv_mac_filter *f; /* Does one already exist? */ f = ixlv_find_mac_filter(sc, macaddr); if (f != NULL) { IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT, MAC_FORMAT_ARGS(macaddr)); return (EEXIST); } /* If not, get a new empty filter */ f = ixlv_get_mac_filter(sc); if (f == NULL) { if_printf(sc->vsi.ifp, "%s: no filters available!!\n", __func__); return (ENOMEM); } IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT, MAC_FORMAT_ARGS(macaddr)); bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); f->flags |= flags; return (0); } /* ** Marks a MAC filter for deletion. */ static int ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr) { struct ixlv_mac_filter *f; f = ixlv_find_mac_filter(sc, macaddr); if (f == NULL) return (ENOENT); f->flags |= IXL_FILTER_DEL; return (0); } /* ** Tasklet handler for MSIX Adminq interrupts ** - done outside interrupt context since it might sleep */ static void ixlv_do_adminq(void *context, int pending) { struct ixlv_sc *sc = context; mtx_lock(&sc->mtx); ixlv_do_adminq_locked(sc); mtx_unlock(&sc->mtx); return; } static void ixlv_do_adminq_locked(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; struct i40e_arq_event_info event; struct virtchnl_msg *v_msg; device_t dev = sc->dev; u16 result = 0; u32 reg, oldreg; i40e_status ret; bool aq_error = false; IXLV_CORE_LOCK_ASSERT(sc); event.buf_len = IXL_AQ_BUF_SZ; event.msg_buf = sc->aq_buffer; v_msg = (struct virtchnl_msg *)&event.desc; do { ret = i40e_clean_arq_element(hw, &event, &result); if (ret) break; ixlv_vc_completion(sc, v_msg->v_opcode, v_msg->v_retval, event.msg_buf, event.msg_len); if (result != 0) bzero(event.msg_buf, IXL_AQ_BUF_SZ); } while (result); /* check for Admin queue errors */ oldreg = reg = rd32(hw, hw->aq.arq.len); if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) { device_printf(dev, "ARQ VF Error detected\n"); reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK; aq_error = true; } if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) { device_printf(dev, "ARQ Overflow Error detected\n"); reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK; aq_error = true; } if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) { device_printf(dev, "ARQ Critical Error detected\n"); reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK; aq_error = true; } if (oldreg != reg) wr32(hw, hw->aq.arq.len, reg); oldreg = reg = rd32(hw, hw->aq.asq.len); if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) { device_printf(dev, "ASQ VF Error detected\n"); reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK; aq_error = true; } if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) { device_printf(dev, "ASQ Overflow Error detected\n"); reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK; aq_error = true; } if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) { device_printf(dev, "ASQ Critical Error detected\n"); reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK; aq_error = true; } if (oldreg != reg) wr32(hw, hw->aq.asq.len, reg); if (aq_error) { /* Need to reset adapter */ device_printf(dev, "WARNING: Resetting!\n"); sc->init_state = IXLV_RESET_REQUIRED; ixlv_stop(sc); ixlv_init_locked(sc); } ixlv_enable_adminq_irq(hw); } static void ixlv_add_sysctls(struct ixlv_sc *sc) { device_t dev = sc->dev; struct ixl_vsi *vsi = &sc->vsi; - struct i40e_eth_stats *es = &vsi->eth_stats; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); - struct sysctl_oid *vsi_node, *queue_node; - struct sysctl_oid_list *vsi_list, *queue_list; - -#define QUEUE_NAME_LEN 32 - char queue_namebuf[QUEUE_NAME_LEN]; - - struct ixl_queue *queues = vsi->queues; - struct tx_ring *txr; - struct rx_ring *rxr; - /* Driver statistics sysctls */ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events", CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq", CTLFLAG_RD, &sc->admin_irq, "Admin Queue IRQ Handled"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_size", CTLFLAG_RD, &vsi->num_tx_desc, 0, "TX ring size"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_size", CTLFLAG_RD, &vsi->num_rx_desc, 0, "RX ring size"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ixlv_sysctl_current_speed, "A", "Current Port Speed"); + ixl_add_sysctls_eth_stats(ctx, child, &vsi->eth_stats); + /* VSI statistics sysctls */ - vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi", + vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi", CTLFLAG_RD, NULL, "VSI-specific statistics"); - vsi_list = SYSCTL_CHILDREN(vsi_node); - - struct ixl_sysctl_info ctls[] = - { - {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"}, - {&es->rx_unicast, "ucast_pkts_rcvd", - "Unicast Packets Received"}, - {&es->rx_multicast, "mcast_pkts_rcvd", - "Multicast Packets Received"}, - {&es->rx_broadcast, "bcast_pkts_rcvd", - "Broadcast Packets Received"}, - {&es->rx_discards, "rx_discards", "Discarded RX packets"}, - {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"}, - {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"}, - {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"}, - {&es->tx_multicast, "mcast_pkts_txd", - "Multicast Packets Transmitted"}, - {&es->tx_broadcast, "bcast_pkts_txd", - "Broadcast Packets Transmitted"}, - {&es->tx_errors, "tx_errors", "TX packet errors"}, - // end - {0,0,0} - }; - struct ixl_sysctl_info *entry = ctls; - while (entry->stat != NULL) - { - SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name, - CTLFLAG_RD, entry->stat, - entry->description); - entry++; - } - - /* Queue sysctls */ - for (int q = 0; q < vsi->num_queues; q++) { - snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q); - queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf, - CTLFLAG_RD, NULL, "Queue Name"); - queue_list = SYSCTL_CHILDREN(queue_node); - - txr = &(queues[q].txr); - rxr = &(queues[q].rxr); - - SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed", - CTLFLAG_RD, &(queues[q].mbuf_defrag_failed), - "m_defrag() failed"); - SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped", - CTLFLAG_RD, &(queues[q].dropped_pkts), - "Driver dropped packets"); - SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs", - CTLFLAG_RD, &(queues[q].irqs), - "irqs on this queue"); - SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx", - CTLFLAG_RD, &(queues[q].tso), - "TSO"); - SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed", - CTLFLAG_RD, &(queues[q].tx_dmamap_failed), - "Driver tx dma failure in xmit"); - SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", - CTLFLAG_RD, &(txr->no_desc), - "Queue No Descriptor Available"); - SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets", - CTLFLAG_RD, &(txr->total_packets), - "Queue Packets Transmitted"); - SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes", - CTLFLAG_RD, &(txr->tx_bytes), - "Queue Bytes Transmitted"); - SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets", - CTLFLAG_RD, &(rxr->rx_packets), - "Queue Packets Received"); - SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes", - CTLFLAG_RD, &(rxr->rx_bytes), - "Queue Bytes Received"); - SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr", - CTLFLAG_RD, &(rxr->itr), 0, - "Queue Rx ITR Interval"); - SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr", - CTLFLAG_RD, &(txr->itr), 0, - "Queue Tx ITR Interval"); - -#ifdef IXL_DEBUG - /* Examine queue state */ - SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head", - CTLTYPE_UINT | CTLFLAG_RD, &queues[q], - sizeof(struct ixl_queue), - ixlv_sysctl_qtx_tail_handler, "IU", - "Queue Transmit Descriptor Tail"); - SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head", - CTLTYPE_UINT | CTLFLAG_RD, &queues[q], - sizeof(struct ixl_queue), - ixlv_sysctl_qrx_tail_handler, "IU", - "Queue Receive Descriptor Tail"); - SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "watchdog_timer", - CTLFLAG_RD, &(txr.watchdog_timer), 0, - "Ticks before watchdog event is triggered"); -#endif - } + ixl_vsi_add_queues_stats(vsi); } static void ixlv_init_filters(struct ixlv_sc *sc) { sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter), M_DEVBUF, M_NOWAIT | M_ZERO); SLIST_INIT(sc->mac_filters); sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO); SLIST_INIT(sc->vlan_filters); return; } static void ixlv_free_filters(struct ixlv_sc *sc) { struct ixlv_mac_filter *f; struct ixlv_vlan_filter *v; while (!SLIST_EMPTY(sc->mac_filters)) { f = SLIST_FIRST(sc->mac_filters); SLIST_REMOVE_HEAD(sc->mac_filters, next); free(f, M_DEVBUF); } free(sc->mac_filters, M_DEVBUF); while (!SLIST_EMPTY(sc->vlan_filters)) { v = SLIST_FIRST(sc->vlan_filters); SLIST_REMOVE_HEAD(sc->vlan_filters, next); free(v, M_DEVBUF); } free(sc->vlan_filters, M_DEVBUF); return; } static char * ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed) { int index; char *speeds[] = { "Unknown", "100 Mbps", "1 Gbps", "10 Gbps", "40 Gbps", "20 Gbps", "25 Gbps", }; switch (link_speed) { case VIRTCHNL_LINK_SPEED_100MB: index = 1; break; case VIRTCHNL_LINK_SPEED_1GB: index = 2; break; case VIRTCHNL_LINK_SPEED_10GB: index = 3; break; case VIRTCHNL_LINK_SPEED_40GB: index = 4; break; case VIRTCHNL_LINK_SPEED_20GB: index = 5; break; case VIRTCHNL_LINK_SPEED_25GB: index = 6; break; case VIRTCHNL_LINK_SPEED_UNKNOWN: default: index = 0; break; } return speeds[index]; } static int ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS) { struct ixlv_sc *sc = (struct ixlv_sc *)arg1; int error = 0; error = sysctl_handle_string(oidp, ixlv_vc_speed_to_string(sc->link_speed), 8, req); return (error); } #ifdef IXL_DEBUG /** * ixlv_sysctl_qtx_tail_handler * Retrieves I40E_QTX_TAIL1 value from hardware * for a sysctl. */ static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS) { struct ixl_queue *que; int error; u32 val; que = ((struct ixl_queue *)oidp->oid_arg1); if (!que) return 0; val = rd32(que->vsi->hw, que->txr.tail); error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return error; return (0); } /** * ixlv_sysctl_qrx_tail_handler * Retrieves I40E_QRX_TAIL1 value from hardware * for a sysctl. */ static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS) { struct ixl_queue *que; int error; u32 val; que = ((struct ixl_queue *)oidp->oid_arg1); if (!que) return 0; val = rd32(que->vsi->hw, que->rxr.tail); error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return error; return (0); } #endif - Index: releng/11.3/sys/dev/ixl/ixl.h =================================================================== --- releng/11.3/sys/dev/ixl/ixl.h (revision 349180) +++ releng/11.3/sys/dev/ixl/ixl.h (revision 349181) @@ -1,709 +1,717 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _IXL_H_ #define _IXL_H_ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_rss.h" #include "opt_ixl.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef RSS #include #include #endif #include "i40e_type.h" #include "i40e_prototype.h" +#include "i40e_dcb.h" #define MAC_FORMAT "%02x:%02x:%02x:%02x:%02x:%02x" #define MAC_FORMAT_ARGS(mac_addr) \ (mac_addr)[0], (mac_addr)[1], (mac_addr)[2], (mac_addr)[3], \ (mac_addr)[4], (mac_addr)[5] #define ON_OFF_STR(is_set) ((is_set) ? "On" : "Off") #ifdef IXL_DEBUG #define _DBG_PRINTF(S, ...) printf("%s: " S "\n", __func__, ##__VA_ARGS__) #define _DEV_DBG_PRINTF(dev, S, ...) device_printf(dev, "%s: " S "\n", __func__, ##__VA_ARGS__) #define _IF_DBG_PRINTF(ifp, S, ...) if_printf(ifp, "%s: " S "\n", __func__, ##__VA_ARGS__) /* Defines for printing generic debug information */ #define DPRINTF(...) _DBG_PRINTF(__VA_ARGS__) #define DDPRINTF(...) _DEV_DBG_PRINTF(__VA_ARGS__) #define IDPRINTF(...) _IF_DBG_PRINTF(__VA_ARGS__) /* Defines for printing specific debug information */ #define DEBUG_INIT 1 #define DEBUG_IOCTL 1 #define DEBUG_HW 1 #define INIT_DEBUGOUT(...) if (DEBUG_INIT) _DBG_PRINTF(__VA_ARGS__) #define INIT_DBG_DEV(...) if (DEBUG_INIT) _DEV_DBG_PRINTF(__VA_ARGS__) #define INIT_DBG_IF(...) if (DEBUG_INIT) _IF_DBG_PRINTF(__VA_ARGS__) #define IOCTL_DEBUGOUT(...) if (DEBUG_IOCTL) _DBG_PRINTF(__VA_ARGS__) #define IOCTL_DBG_IF2(ifp, S, ...) if (DEBUG_IOCTL) \ if_printf(ifp, S "\n", ##__VA_ARGS__) #define IOCTL_DBG_IF(...) if (DEBUG_IOCTL) _IF_DBG_PRINTF(__VA_ARGS__) #define HW_DEBUGOUT(...) if (DEBUG_HW) _DBG_PRINTF(__VA_ARGS__) #else /* no IXL_DEBUG */ #define DEBUG_INIT 0 #define DEBUG_IOCTL 0 #define DEBUG_HW 0 #define DPRINTF(...) #define DDPRINTF(...) #define IDPRINTF(...) #define INIT_DEBUGOUT(...) #define INIT_DBG_DEV(...) #define INIT_DBG_IF(...) #define IOCTL_DEBUGOUT(...) #define IOCTL_DBG_IF2(...) #define IOCTL_DBG_IF(...) #define HW_DEBUGOUT(...) #endif /* IXL_DEBUG */ enum ixl_dbg_mask { IXL_DBG_INFO = 0x00000001, IXL_DBG_EN_DIS = 0x00000002, IXL_DBG_AQ = 0x00000004, IXL_DBG_NVMUPD = 0x00000008, IXL_DBG_IOCTL_KNOWN = 0x00000010, IXL_DBG_IOCTL_UNKNOWN = 0x00000020, IXL_DBG_IOCTL_ALL = 0x00000030, I40E_DEBUG_RSS = 0x00000100, IXL_DBG_IOV = 0x00001000, IXL_DBG_IOV_VC = 0x00002000, IXL_DBG_SWITCH_INFO = 0x00010000, IXL_DBG_I2C = 0x00020000, IXL_DBG_ALL = 0xFFFFFFFF }; /* Tunables */ /* * Ring Descriptors Valid Range: 32-4096 Default Value: 1024 This value is the * number of tx/rx descriptors allocated by the driver. Increasing this * value allows the driver to queue more operations. * * Tx descriptors are always 16 bytes, but Rx descriptors can be 32 bytes. * The driver currently always uses 32 byte Rx descriptors. */ #define IXL_DEFAULT_RING 1024 #define IXL_MAX_RING 4096 #define IXL_MIN_RING 64 #define IXL_RING_INCREMENT 32 #define IXL_AQ_LEN 256 #define IXL_AQ_LEN_MAX 1024 /* ** Default number of entries in Tx queue buf_ring. */ #define DEFAULT_TXBRSZ 4096 /* Alignment for rings */ #define DBA_ALIGN 128 /* * This is the max watchdog interval, ie. the time that can * pass between any two TX clean operations, such only happening * when the TX hardware is functioning. * * XXX: Watchdog currently counts down in units of (hz) * Set this to just (hz) if you want queues to hang under a little bit of stress */ #define IXL_WATCHDOG (10 * hz) /* * This parameters control when the driver calls the routine to reclaim * transmit descriptors. */ #define IXL_TX_CLEANUP_THRESHOLD (que->num_tx_desc / 8) #define IXL_TX_OP_THRESHOLD (que->num_tx_desc / 32) #define MAX_MULTICAST_ADDR 128 #define IXL_MSIX_BAR 3 #define IXL_ADM_LIMIT 2 #define IXL_TSO_SIZE 65535 #define IXL_AQ_BUF_SZ ((u32) 4096) #define IXL_RX_HDR 128 #define IXL_RX_LIMIT 512 #define IXL_RX_ITR 0 #define IXL_TX_ITR 1 #define IXL_ITR_NONE 3 #define IXL_QUEUE_EOL 0x7FF #define IXL_MAX_FRAME 9728 #define IXL_MAX_TX_SEGS 8 #define IXL_MAX_TSO_SEGS 128 #define IXL_SPARSE_CHAIN 7 #define IXL_QUEUE_HUNG 0x80000000 #define IXL_MIN_TSO_MSS 64 #define IXL_MAX_DMA_SEG_SIZE ((16 * 1024) - 1) #define IXL_RSS_KEY_SIZE_REG 13 #define IXL_RSS_KEY_SIZE (IXL_RSS_KEY_SIZE_REG * 4) #define IXL_RSS_VSI_LUT_SIZE 64 /* X722 -> VSI, X710 -> VF */ #define IXL_RSS_VSI_LUT_ENTRY_MASK 0x3F #define IXL_RSS_VF_LUT_ENTRY_MASK 0xF #define IXL_VF_MAX_BUFFER 0x3F80 #define IXL_VF_MAX_HDR_BUFFER 0x840 #define IXL_VF_MAX_FRAME 0x3FFF /* ERJ: hardware can support ~2k (SW5+) filters between all functions */ #define IXL_MAX_FILTERS 256 #define IXL_MAX_TX_BUSY 10 #define IXL_NVM_VERSION_LO_SHIFT 0 #define IXL_NVM_VERSION_LO_MASK (0xff << IXL_NVM_VERSION_LO_SHIFT) #define IXL_NVM_VERSION_HI_SHIFT 12 #define IXL_NVM_VERSION_HI_MASK (0xf << IXL_NVM_VERSION_HI_SHIFT) /* * Interrupt Moderation parameters * Multiply ITR values by 2 for real ITR value */ #define IXL_MAX_ITR 0x0FF0 #define IXL_ITR_100K 0x0005 #define IXL_ITR_20K 0x0019 #define IXL_ITR_8K 0x003E #define IXL_ITR_4K 0x007A #define IXL_ITR_1K 0x01F4 #define IXL_ITR_DYNAMIC 0x8000 #define IXL_LOW_LATENCY 0 #define IXL_AVE_LATENCY 1 #define IXL_BULK_LATENCY 2 /* MacVlan Flags */ #define IXL_FILTER_USED (u16)(1 << 0) #define IXL_FILTER_VLAN (u16)(1 << 1) #define IXL_FILTER_ADD (u16)(1 << 2) #define IXL_FILTER_DEL (u16)(1 << 3) #define IXL_FILTER_MC (u16)(1 << 4) /* used in the vlan field of the filter when not a vlan */ #define IXL_VLAN_ANY -1 #define CSUM_OFFLOAD_IPV4 (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP) #define CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6|CSUM_UDP_IPV6|CSUM_SCTP_IPV6) #define CSUM_OFFLOAD (CSUM_OFFLOAD_IPV4|CSUM_OFFLOAD_IPV6|CSUM_TSO) /* Misc flags for ixl_vsi.flags */ #define IXL_FLAGS_KEEP_TSO4 (1 << 0) #define IXL_FLAGS_KEEP_TSO6 (1 << 1) #define IXL_FLAGS_USES_MSIX (1 << 2) #define IXL_FLAGS_IS_VF (1 << 3) +#define IXL_VSI_IS_PF(v) ((v->flags & IXL_FLAGS_IS_VF) == 0) +#define IXL_VSI_IS_VF(v) ((v->flags & IXL_FLAGS_IS_VF) != 0) + #define IXL_VF_RESET_TIMEOUT 100 #define IXL_VSI_DATA_PORT 0x01 #define IXLV_MAX_QUEUES 16 #define IXL_MAX_VSI_QUEUES (2 * (I40E_VSILAN_QTABLE_MAX_INDEX + 1)) #define IXL_RX_CTX_BASE_UNITS 128 #define IXL_TX_CTX_BASE_UNITS 128 #define IXL_VPINT_LNKLSTN_REG(hw, vector, vf_num) \ I40E_VPINT_LNKLSTN(((vector) - 1) + \ (((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num))) #define IXL_VFINT_DYN_CTLN_REG(hw, vector, vf_num) \ I40E_VFINT_DYN_CTLN(((vector) - 1) + \ (((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num))) #define IXL_PF_PCI_CIAA_VF_DEVICE_STATUS 0xAA #define IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK 0x20 #define IXL_GLGEN_VFLRSTAT_INDEX(glb_vf) ((glb_vf) / 32) #define IXL_GLGEN_VFLRSTAT_MASK(glb_vf) (1 << ((glb_vf) % 32)) #define IXL_MAX_ITR_IDX 3 #define IXL_END_OF_INTR_LNKLST 0x7FF #define IXL_DEFAULT_RSS_HENA_BASE (\ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) #define IXL_DEFAULT_RSS_HENA_XL710 IXL_DEFAULT_RSS_HENA_BASE #define IXL_DEFAULT_RSS_HENA_X722 (\ IXL_DEFAULT_RSS_HENA_BASE | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK)) #define IXL_TX_LOCK(_sc) mtx_lock(&(_sc)->mtx) #define IXL_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) #define IXL_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx) #define IXL_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->mtx) #define IXL_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->mtx, MA_OWNED) #define IXL_RX_LOCK(_sc) mtx_lock(&(_sc)->mtx) #define IXL_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) #define IXL_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx) /* Pre-11 counter(9) compatibility */ #if __FreeBSD_version >= 1100036 #define IXL_SET_IPACKETS(vsi, count) (vsi)->ipackets = (count) #define IXL_SET_IERRORS(vsi, count) (vsi)->ierrors = (count) #define IXL_SET_OPACKETS(vsi, count) (vsi)->opackets = (count) #define IXL_SET_OERRORS(vsi, count) (vsi)->oerrors = (count) #define IXL_SET_COLLISIONS(vsi, count) /* Do nothing; collisions is always 0. */ #define IXL_SET_IBYTES(vsi, count) (vsi)->ibytes = (count) #define IXL_SET_OBYTES(vsi, count) (vsi)->obytes = (count) #define IXL_SET_IMCASTS(vsi, count) (vsi)->imcasts = (count) #define IXL_SET_OMCASTS(vsi, count) (vsi)->omcasts = (count) #define IXL_SET_IQDROPS(vsi, count) (vsi)->iqdrops = (count) #define IXL_SET_OQDROPS(vsi, count) (vsi)->oqdrops = (count) #define IXL_SET_NOPROTO(vsi, count) (vsi)->noproto = (count) #else #define IXL_SET_IPACKETS(vsi, count) (vsi)->ifp->if_ipackets = (count) #define IXL_SET_IERRORS(vsi, count) (vsi)->ifp->if_ierrors = (count) #define IXL_SET_OPACKETS(vsi, count) (vsi)->ifp->if_opackets = (count) #define IXL_SET_OERRORS(vsi, count) (vsi)->ifp->if_oerrors = (count) #define IXL_SET_COLLISIONS(vsi, count) (vsi)->ifp->if_collisions = (count) #define IXL_SET_IBYTES(vsi, count) (vsi)->ifp->if_ibytes = (count) #define IXL_SET_OBYTES(vsi, count) (vsi)->ifp->if_obytes = (count) #define IXL_SET_IMCASTS(vsi, count) (vsi)->ifp->if_imcasts = (count) #define IXL_SET_OMCASTS(vsi, count) (vsi)->ifp->if_omcasts = (count) #define IXL_SET_IQDROPS(vsi, count) (vsi)->ifp->if_iqdrops = (count) #define IXL_SET_OQDROPS(vsi, odrops) (vsi)->ifp->if_snd.ifq_drops = (odrops) #define IXL_SET_NOPROTO(vsi, count) (vsi)->noproto = (count) #endif +/* For stats sysctl naming */ +#define IXL_QUEUE_NAME_LEN 32 + /* ***************************************************************************** * vendor_info_array * * This array contains the list of Subvendor/Subdevice IDs on which the driver * should load. * ***************************************************************************** */ typedef struct _ixl_vendor_info_t { unsigned int vendor_id; unsigned int device_id; unsigned int subvendor_id; unsigned int subdevice_id; unsigned int index; } ixl_vendor_info_t; struct ixl_tx_buf { u32 eop_index; struct mbuf *m_head; bus_dmamap_t map; bus_dma_tag_t tag; }; struct ixl_rx_buf { struct mbuf *m_head; struct mbuf *m_pack; struct mbuf *fmp; bus_dmamap_t hmap; bus_dmamap_t pmap; }; /* ** This struct has multiple uses, multicast ** addresses, vlans, and mac filters all use it. */ struct ixl_mac_filter { SLIST_ENTRY(ixl_mac_filter) next; u8 macaddr[ETHER_ADDR_LEN]; s16 vlan; u16 flags; }; /* * The Transmit ring control struct */ struct tx_ring { struct ixl_queue *que; struct mtx mtx; u32 tail; struct i40e_tx_desc *base; struct i40e_dma_mem dma; u16 next_avail; u16 next_to_clean; u16 atr_rate; u16 atr_count; u32 itr; u32 latency; struct ixl_tx_buf *buffers; volatile u16 avail; u32 cmd; bus_dma_tag_t tx_tag; bus_dma_tag_t tso_tag; char mtx_name[16]; struct buf_ring *br; s32 watchdog_timer; /* Used for Dynamic ITR calculation */ u32 packets; - u32 bytes; + u32 bytes; /* Soft Stats */ u64 tx_bytes; u64 no_desc; u64 total_packets; }; /* * The Receive ring control struct */ struct rx_ring { struct ixl_queue *que; struct mtx mtx; union i40e_rx_desc *base; struct i40e_dma_mem dma; struct lro_ctrl lro; bool lro_enabled; bool hdr_split; bool discard; u32 next_refresh; - u32 next_check; + u32 next_check; u32 itr; u32 latency; char mtx_name[16]; struct ixl_rx_buf *buffers; u32 mbuf_sz; u32 tail; bus_dma_tag_t htag; bus_dma_tag_t ptag; /* Used for Dynamic ITR calculation */ u32 packets; - u32 bytes; + u32 bytes; /* Soft stats */ u64 split; u64 rx_packets; - u64 rx_bytes; - u64 desc_errs; - u64 not_done; + u64 rx_bytes; + u64 desc_errs; + u64 not_done; }; /* ** Driver queue struct: this is the interrupt container ** for the associated tx and rx ring pair. */ struct ixl_queue { struct ixl_vsi *vsi; u32 me; u32 msix; /* This queue's MSIX vector */ u32 eims; /* This queue's EIMS bit */ struct resource *res; void *tag; int num_tx_desc; /* both tx and rx */ int num_rx_desc; /* both tx and rx */ struct tx_ring txr; struct rx_ring rxr; struct task task; struct task tx_task; struct taskqueue *tq; /* Queue stats */ u64 irqs; u64 tso; u64 mbuf_defrag_failed; u64 mbuf_hdr_failed; u64 mbuf_pkt_failed; u64 tx_dmamap_failed; u64 dropped_pkts; u64 mss_too_small; }; /* ** Virtual Station Interface */ SLIST_HEAD(ixl_ftl_head, ixl_mac_filter); struct ixl_vsi { - void *back; + void *back; struct ifnet *ifp; struct device *dev; struct i40e_hw *hw; struct ifmedia media; enum i40e_vsi_type type; int id; u16 num_queues; int num_tx_desc; int num_rx_desc; u32 rx_itr_setting; u32 tx_itr_setting; u16 max_frame_size; bool enable_head_writeback; struct ixl_queue *queues; /* head of queues */ u16 vsi_num; bool link_active; u16 seid; u16 uplink_seid; u16 downlink_seid; /* MAC/VLAN Filter list */ struct ixl_ftl_head ftl; u16 num_macs; + u64 num_hw_filters; /* Contains readylist & stat counter id */ struct i40e_aqc_vsi_properties_data info; - eventhandler_tag vlan_attach; - eventhandler_tag vlan_detach; + eventhandler_tag vlan_attach; + eventhandler_tag vlan_detach; u16 num_vlans; /* Per-VSI stats from hardware */ struct i40e_eth_stats eth_stats; struct i40e_eth_stats eth_stats_offsets; - bool stat_offsets_loaded; + bool stat_offsets_loaded; /* VSI stat counters */ u64 ipackets; u64 ierrors; u64 opackets; u64 oerrors; u64 ibytes; u64 obytes; u64 imcasts; u64 omcasts; u64 iqdrops; u64 oqdrops; u64 noproto; - /* Driver statistics */ - u64 hw_filters_del; - u64 hw_filters_add; - /* Misc. */ - u64 flags; + u64 flags; struct sysctl_oid *vsi_node; + struct sysctl_ctx_list sysctl_ctx; }; /* ** Find the number of unrefreshed RX descriptors */ static inline u16 ixl_rx_unrefreshed(struct ixl_queue *que) -{ +{ struct rx_ring *rxr = &que->rxr; - + if (rxr->next_check > rxr->next_refresh) return (rxr->next_check - rxr->next_refresh - 1); else return ((que->num_rx_desc + rxr->next_check) - rxr->next_refresh - 1); } /* ** Find the next available unused filter */ static inline struct ixl_mac_filter * ixl_get_filter(struct ixl_vsi *vsi) { struct ixl_mac_filter *f; /* create a new empty filter */ f = malloc(sizeof(struct ixl_mac_filter), M_DEVBUF, M_NOWAIT | M_ZERO); if (f) SLIST_INSERT_HEAD(&vsi->ftl, f, next); return (f); } /* ** Compare two ethernet addresses */ static inline bool cmp_etheraddr(const u8 *ea1, const u8 *ea2) -{ +{ bool cmp = FALSE; if ((ea1[0] == ea2[0]) && (ea1[1] == ea2[1]) && (ea1[2] == ea2[2]) && (ea1[3] == ea2[3]) && - (ea1[4] == ea2[4]) && (ea1[5] == ea2[5])) + (ea1[4] == ea2[4]) && (ea1[5] == ea2[5])) cmp = TRUE; return (cmp); -} +} /* * Return next largest power of 2, unsigned * * Public domain, from Bit Twiddling Hacks */ static inline u32 next_power_of_two(u32 n) { n--; n |= n >> 1; n |= n >> 2; n |= n >> 4; n |= n >> 8; n |= n >> 16; n++; /* Next power of two > 0 is 1 */ n += (n == 0); return (n); } /* * Info for stats sysctls */ struct ixl_sysctl_info { u64 *stat; char *name; char *description; }; extern const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN]; /********************************************************************* * TXRX Function prototypes *********************************************************************/ int ixl_allocate_tx_data(struct ixl_queue *); int ixl_allocate_rx_data(struct ixl_queue *); void ixl_init_tx_ring(struct ixl_queue *); int ixl_init_rx_ring(struct ixl_queue *); bool ixl_rxeof(struct ixl_queue *, int); bool ixl_txeof(struct ixl_queue *); void ixl_free_que_tx(struct ixl_queue *); void ixl_free_que_rx(struct ixl_queue *); int ixl_mq_start(struct ifnet *, struct mbuf *); int ixl_mq_start_locked(struct ifnet *, struct tx_ring *); void ixl_deferred_mq_start(void *, int); +void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *, struct sysctl_oid_list *, + struct i40e_eth_stats *); +void ixl_vsi_add_queues_stats(struct ixl_vsi *); void ixl_vsi_setup_rings_size(struct ixl_vsi *, int, int); int ixl_queue_hang_check(struct ixl_vsi *); void ixl_free_vsi(struct ixl_vsi *); void ixl_qflush(struct ifnet *); /* Common function prototypes between PF/VF driver */ #if __FreeBSD_version >= 1100000 uint64_t ixl_get_counter(if_t ifp, ift_counter cnt); #endif void ixl_get_default_rss_key(u32 *); const char * i40e_vc_stat_str(struct i40e_hw *hw, enum virtchnl_status_code stat_err); void ixl_set_busmaster(device_t); void ixl_set_msix_enable(device_t); #endif /* _IXL_H_ */ Index: releng/11.3/sys/dev/ixl/ixl_iw.c =================================================================== --- releng/11.3/sys/dev/ixl/ixl_iw.c (revision 349180) +++ releng/11.3/sys/dev/ixl/ixl_iw.c (revision 349181) @@ -1,490 +1,490 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixl.h" #include "ixl_pf.h" #include "ixl_iw.h" #include "ixl_iw_int.h" #ifdef IXL_IW #define IXL_IW_VEC_BASE(pf) ((pf)->msix - (pf)->iw_msix) #define IXL_IW_VEC_COUNT(pf) ((pf)->iw_msix) #define IXL_IW_VEC_LIMIT(pf) ((pf)->msix) extern int ixl_enable_iwarp; static struct ixl_iw_state ixl_iw; static int ixl_iw_ref_cnt; static void ixl_iw_pf_msix_reset(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 reg; int vec; for (vec = IXL_IW_VEC_BASE(pf); vec < IXL_IW_VEC_LIMIT(pf); vec++) { reg = I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK; wr32(hw, I40E_PFINT_LNKLSTN(vec - 1), reg); } return; } static void ixl_iw_invoke_op(void *context, int pending) { struct ixl_iw_pf_entry *pf_entry = (struct ixl_iw_pf_entry *)context; struct ixl_iw_pf info; bool initialize; int err; INIT_DEBUGOUT("begin"); mtx_lock(&ixl_iw.mtx); if ((pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_ON) && (pf_entry->state.iw_current == IXL_IW_PF_STATE_OFF)) initialize = true; else if ((pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_OFF) && (pf_entry->state.iw_current == IXL_IW_PF_STATE_ON)) initialize = false; else { /* nothing to be done, so finish here */ mtx_unlock(&ixl_iw.mtx); return; } info = pf_entry->pf_info; mtx_unlock(&ixl_iw.mtx); if (initialize) { err = ixl_iw.ops->init(&info); if (err) device_printf(pf_entry->pf->dev, "%s: failed to initialize iwarp (err %d)\n", __func__, err); else pf_entry->state.iw_current = IXL_IW_PF_STATE_ON; } else { err = ixl_iw.ops->stop(&info); if (err) device_printf(pf_entry->pf->dev, "%s: failed to stop iwarp (err %d)\n", __func__, err); else { ixl_iw_pf_msix_reset(pf_entry->pf); pf_entry->state.iw_current = IXL_IW_PF_STATE_OFF; } } return; } static void ixl_iw_uninit(void) { INIT_DEBUGOUT("begin"); mtx_destroy(&ixl_iw.mtx); return; } static void ixl_iw_init(void) { INIT_DEBUGOUT("begin"); LIST_INIT(&ixl_iw.pfs); mtx_init(&ixl_iw.mtx, "ixl_iw_pfs", NULL, MTX_DEF); ixl_iw.registered = false; return; } /****************************************************************************** * if_ixl internal API *****************************************************************************/ int ixl_iw_pf_init(struct ixl_pf *pf) { struct ixl_iw_pf_entry *pf_entry; struct ixl_iw_pf *pf_info; int err = 0; INIT_DEBUGOUT("begin"); mtx_lock(&ixl_iw.mtx); LIST_FOREACH(pf_entry, &ixl_iw.pfs, node) if (pf_entry->pf == pf) break; if (pf_entry == NULL) { /* attempt to initialize PF not yet attached - sth is wrong */ device_printf(pf->dev, "%s: PF not found\n", __func__); err = ENOENT; goto out; } pf_info = &pf_entry->pf_info; pf_info->handle = (void *)pf; pf_info->ifp = pf->vsi.ifp; pf_info->dev = pf->dev; pf_info->pci_mem = pf->pci_mem; pf_info->pf_id = pf->hw.pf_id; pf_info->mtu = pf->vsi.ifp->if_mtu; pf_info->iw_msix.count = IXL_IW_VEC_COUNT(pf); pf_info->iw_msix.base = IXL_IW_VEC_BASE(pf); for (int i = 0; i < IXL_IW_MAX_USER_PRIORITY; i++) pf_info->qs_handle[i] = le16_to_cpu(pf->vsi.info.qs_handle[0]); pf_entry->state.pf = IXL_IW_PF_STATE_ON; if (ixl_iw.registered) { pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_ON; taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task); } out: mtx_unlock(&ixl_iw.mtx); return (err); } void ixl_iw_pf_stop(struct ixl_pf *pf) { struct ixl_iw_pf_entry *pf_entry; INIT_DEBUGOUT("begin"); mtx_lock(&ixl_iw.mtx); LIST_FOREACH(pf_entry, &ixl_iw.pfs, node) if (pf_entry->pf == pf) break; if (pf_entry == NULL) { /* attempt to stop PF which has not been attached - sth is wrong */ device_printf(pf->dev, "%s: PF not found\n", __func__); goto out; } pf_entry->state.pf = IXL_IW_PF_STATE_OFF; if (pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_ON) { pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_OFF; if (ixl_iw.registered) taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task); } out: mtx_unlock(&ixl_iw.mtx); return; } int ixl_iw_pf_attach(struct ixl_pf *pf) { struct ixl_iw_pf_entry *pf_entry; int err = 0; INIT_DEBUGOUT("begin"); if (ixl_iw_ref_cnt == 0) ixl_iw_init(); mtx_lock(&ixl_iw.mtx); LIST_FOREACH(pf_entry, &ixl_iw.pfs, node) if (pf_entry->pf == pf) { device_printf(pf->dev, "%s: PF already exists\n", __func__); err = EEXIST; goto out; } pf_entry = malloc(sizeof(struct ixl_iw_pf_entry), M_DEVBUF, M_NOWAIT | M_ZERO); if (pf_entry == NULL) { device_printf(pf->dev, "%s: failed to allocate memory to attach new PF\n", __func__); err = ENOMEM; goto out; } pf_entry->pf = pf; pf_entry->state.pf = IXL_IW_PF_STATE_OFF; pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_OFF; pf_entry->state.iw_current = IXL_IW_PF_STATE_OFF; LIST_INSERT_HEAD(&ixl_iw.pfs, pf_entry, node); ixl_iw_ref_cnt++; TASK_INIT(&pf_entry->iw_task, 0, ixl_iw_invoke_op, pf_entry); out: mtx_unlock(&ixl_iw.mtx); return (err); } int ixl_iw_pf_detach(struct ixl_pf *pf) { struct ixl_iw_pf_entry *pf_entry; int err = 0; INIT_DEBUGOUT("begin"); mtx_lock(&ixl_iw.mtx); LIST_FOREACH(pf_entry, &ixl_iw.pfs, node) if (pf_entry->pf == pf) break; if (pf_entry == NULL) { /* attempt to stop PF which has not been attached - sth is wrong */ device_printf(pf->dev, "%s: PF not found\n", __func__); err = ENOENT; goto out; } if (pf_entry->state.pf != IXL_IW_PF_STATE_OFF) { /* attempt to detach PF which has not yet been stopped - sth is wrong */ device_printf(pf->dev, "%s: failed - PF is still active\n", __func__); err = EBUSY; goto out; } LIST_REMOVE(pf_entry, node); free(pf_entry, M_DEVBUF); ixl_iw_ref_cnt--; out: mtx_unlock(&ixl_iw.mtx); if (ixl_iw_ref_cnt == 0) ixl_iw_uninit(); return (err); } /****************************************************************************** * API exposed to iw_ixl module *****************************************************************************/ int ixl_iw_pf_reset(void *pf_handle) { struct ixl_pf *pf = (struct ixl_pf *)pf_handle; INIT_DEBUGOUT("begin"); IXL_PF_LOCK(pf); ixl_init_locked(pf); IXL_PF_UNLOCK(pf); return (0); } int ixl_iw_pf_msix_init(void *pf_handle, struct ixl_iw_msix_mapping *msix_info) { struct ixl_pf *pf = (struct ixl_pf *)pf_handle; struct i40e_hw *hw = &pf->hw; u32 reg; int vec, i; INIT_DEBUGOUT("begin"); if ((msix_info->aeq_vector < IXL_IW_VEC_BASE(pf)) || (msix_info->aeq_vector >= IXL_IW_VEC_LIMIT(pf))) { printf("%s: invalid MSIX vector (%i) for AEQ\n", __func__, msix_info->aeq_vector); return (EINVAL); } reg = I40E_PFINT_AEQCTL_CAUSE_ENA_MASK | (msix_info->aeq_vector << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) | (msix_info->itr_indx << I40E_PFINT_AEQCTL_ITR_INDX_SHIFT); wr32(hw, I40E_PFINT_AEQCTL, reg); for (vec = IXL_IW_VEC_BASE(pf); vec < IXL_IW_VEC_LIMIT(pf); vec++) { for (i = 0; i < msix_info->ceq_cnt; i++) if (msix_info->ceq_vector[i] == vec) break; if (i == msix_info->ceq_cnt) { /* this vector has no CEQ mapped */ reg = I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK; wr32(hw, I40E_PFINT_LNKLSTN(vec - 1), reg); } else { reg = (i & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) | (I40E_QUEUE_TYPE_PE_CEQ << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); wr32(hw, I40E_PFINT_LNKLSTN(vec - 1), reg); reg = I40E_PFINT_CEQCTL_CAUSE_ENA_MASK | (vec << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) | (msix_info->itr_indx << I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) | (IXL_QUEUE_EOL << I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT); wr32(hw, I40E_PFINT_CEQCTL(i), reg); } } return (0); } int ixl_iw_register(struct ixl_iw_ops *ops) { struct ixl_iw_pf_entry *pf_entry; int err = 0; int iwarp_cap_on_pfs = 0; INIT_DEBUGOUT("begin"); LIST_FOREACH(pf_entry, &ixl_iw.pfs, node) iwarp_cap_on_pfs += pf_entry->pf->hw.func_caps.iwarp; if (!iwarp_cap_on_pfs && ixl_enable_iwarp) { printf("%s: the device is not iwarp-capable, registering dropped\n", __func__); return (ENODEV); } if (ixl_enable_iwarp == 0) { printf("%s: enable_iwarp is off, registering dropped\n", __func__); return (EACCES); } if ((ops->init == NULL) || (ops->stop == NULL)) { printf("%s: invalid iwarp driver ops\n", __func__); return (EINVAL); } mtx_lock(&ixl_iw.mtx); if (ixl_iw.registered) { printf("%s: iwarp driver already registered\n", __func__); err = (EBUSY); goto out; } ixl_iw.registered = true; mtx_unlock(&ixl_iw.mtx); ixl_iw.tq = taskqueue_create("ixl_iw", M_NOWAIT, taskqueue_thread_enqueue, &ixl_iw.tq); if (ixl_iw.tq == NULL) { printf("%s: failed to create queue\n", __func__); ixl_iw.registered = false; return (ENOMEM); } taskqueue_start_threads(&ixl_iw.tq, 1, PI_NET, "ixl iw"); ixl_iw.ops = malloc(sizeof(struct ixl_iw_ops), M_DEVBUF, M_NOWAIT | M_ZERO); if (ixl_iw.ops == NULL) { printf("%s: failed to allocate memory\n", __func__); taskqueue_free(ixl_iw.tq); ixl_iw.registered = false; return (ENOMEM); } ixl_iw.ops->init = ops->init; ixl_iw.ops->stop = ops->stop; mtx_lock(&ixl_iw.mtx); LIST_FOREACH(pf_entry, &ixl_iw.pfs, node) if (pf_entry->state.pf == IXL_IW_PF_STATE_ON) { pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_ON; taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task); } out: mtx_unlock(&ixl_iw.mtx); return (err); } int ixl_iw_unregister(void) { struct ixl_iw_pf_entry *pf_entry; int iwarp_cap_on_pfs = 0; INIT_DEBUGOUT("begin"); LIST_FOREACH(pf_entry, &ixl_iw.pfs, node) iwarp_cap_on_pfs += pf_entry->pf->hw.func_caps.iwarp; if (!iwarp_cap_on_pfs && ixl_enable_iwarp) { printf("%s: attempt to unregister driver when no iwarp-capable device present\n", __func__); return (ENODEV); } if (ixl_enable_iwarp == 0) { printf("%s: attempt to unregister driver when enable_iwarp is off\n", __func__); return (ENODEV); } mtx_lock(&ixl_iw.mtx); if (!ixl_iw.registered) { printf("%s: failed - iwarp driver has not been registered\n", __func__); mtx_unlock(&ixl_iw.mtx); return (ENOENT); } LIST_FOREACH(pf_entry, &ixl_iw.pfs, node) if (pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_ON) { pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_OFF; taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task); } ixl_iw.registered = false; mtx_unlock(&ixl_iw.mtx); LIST_FOREACH(pf_entry, &ixl_iw.pfs, node) taskqueue_drain(ixl_iw.tq, &pf_entry->iw_task); taskqueue_free(ixl_iw.tq); ixl_iw.tq = NULL; free(ixl_iw.ops, M_DEVBUF); ixl_iw.ops = NULL; return (0); } #endif /* IXL_IW */ Index: releng/11.3/sys/dev/ixl/ixl_iw.h =================================================================== --- releng/11.3/sys/dev/ixl/ixl_iw.h (revision 349180) +++ releng/11.3/sys/dev/ixl/ixl_iw.h (revision 349181) @@ -1,75 +1,75 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _IXL_IW_H_ #define _IXL_IW_H_ #define IXL_IW_MAX_USER_PRIORITY 8 #define IXL_IW_MAX_MSIX 64 struct ixl_iw_msix_mapping { u8 itr_indx; int aeq_vector; int ceq_cnt; int *ceq_vector; }; struct ixl_iw_msix { int base; int count; }; struct ixl_iw_pf { void *handle; struct ifnet *ifp; device_t dev; struct resource *pci_mem; u8 pf_id; u16 mtu; struct ixl_iw_msix iw_msix; u16 qs_handle[IXL_IW_MAX_USER_PRIORITY]; }; struct ixl_iw_ops { int (*init)(struct ixl_iw_pf *pf_info); int (*stop)(struct ixl_iw_pf *pf_info); }; int ixl_iw_pf_reset(void *pf_handle); int ixl_iw_pf_msix_init(void *pf_handle, struct ixl_iw_msix_mapping *msix_info); int ixl_iw_register(struct ixl_iw_ops *iw_ops); int ixl_iw_unregister(void); #endif /* _IXL_IW_H_ */ Index: releng/11.3/sys/dev/ixl/ixl_iw_int.h =================================================================== --- releng/11.3/sys/dev/ixl/ixl_iw_int.h (revision 349180) +++ releng/11.3/sys/dev/ixl/ixl_iw_int.h (revision 349181) @@ -1,71 +1,71 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _IXL_IW_INT_H_ #define _IXL_IW_INT_H_ enum ixl_iw_pf_state { IXL_IW_PF_STATE_OFF, IXL_IW_PF_STATE_ON }; struct ixl_iw_pf_entry_state { enum ixl_iw_pf_state pf; enum ixl_iw_pf_state iw_scheduled; enum ixl_iw_pf_state iw_current; }; struct ixl_iw_pf_entry { LIST_ENTRY(ixl_iw_pf_entry) node; struct ixl_pf *pf; struct ixl_iw_pf_entry_state state; struct ixl_iw_pf pf_info; struct task iw_task; }; LIST_HEAD(ixl_iw_pfs_head, ixl_iw_pf_entry); struct ixl_iw_state { struct ixl_iw_ops *ops; bool registered; struct ixl_iw_pfs_head pfs; struct mtx mtx; struct taskqueue *tq; }; int ixl_iw_pf_init(struct ixl_pf *pf); void ixl_iw_pf_stop(struct ixl_pf *pf); int ixl_iw_pf_attach(struct ixl_pf *pf); int ixl_iw_pf_detach(struct ixl_pf *pf); #endif /* _IXL_IW_INT_H_ */ Index: releng/11.3/sys/dev/ixl/ixl_pf.h =================================================================== --- releng/11.3/sys/dev/ixl/ixl_pf.h (revision 349180) +++ releng/11.3/sys/dev/ixl/ixl_pf.h (revision 349181) @@ -1,363 +1,396 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _IXL_PF_H_ #define _IXL_PF_H_ #include "ixl.h" #include "ixl_pf_qmgr.h" #define VF_FLAG_ENABLED 0x01 #define VF_FLAG_SET_MAC_CAP 0x02 #define VF_FLAG_VLAN_CAP 0x04 #define VF_FLAG_PROMISC_CAP 0x08 #define VF_FLAG_MAC_ANTI_SPOOF 0x10 #define IXL_PF_STATE_EMPR_RESETTING (1 << 0) #define IXL_PF_STATE_FW_LLDP_DISABLED (1 << 1) +#define IXL_PF_STATE_RECOVERY_MODE (1 << 2) +#define IXL_PF_STATE_EEE_ENABLED (1 << 3) +enum ixl_i2c_access_method_t { + IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE = 0, + IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS = 1, + IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD = 2, + IXL_I2C_ACCESS_METHOD_AQ = 3, + IXL_I2C_ACCESS_METHOD_TYPE_LENGTH = 4 +}; + struct ixl_vf { struct ixl_vsi vsi; uint32_t vf_flags; uint8_t mac[ETHER_ADDR_LEN]; uint16_t vf_num; uint32_t version; struct ixl_pf_qtag qtag; - struct sysctl_ctx_list ctx; }; /* Physical controller structure */ struct ixl_pf { struct i40e_hw hw; struct i40e_osdep osdep; struct device *dev; struct ixl_vsi vsi; struct resource *pci_mem; struct resource *msix_mem; /* * Interrupt resources: this set is * either used for legacy, or for Link * when doing MSIX */ void *tag; struct resource *res; struct callout timer; int msix; #ifdef IXL_IW int iw_msix; bool iw_enabled; #endif int if_flags; - int state; + volatile int state; bool init_in_progress; u8 supported_speeds; struct ixl_pf_qmgr qmgr; struct ixl_pf_qtag qtag; /* Tunable values */ bool enable_msix; int max_queues; bool enable_tx_fc_filter; int dynamic_rx_itr; int dynamic_tx_itr; int tx_itr; int rx_itr; struct mtx pf_mtx; - u32 qbase; u32 admvec; struct task adminq; struct taskqueue *tq; bool link_up; u32 link_speed; int advertised_speed; int fc; /* link flow ctrl setting */ enum ixl_dbg_mask dbg_mask; bool has_i2c; /* Misc stats maintained by the driver */ u64 watchdog_events; u64 admin_irq; /* Statistics from hw */ struct i40e_hw_port_stats stats; struct i40e_hw_port_stats stats_offsets; bool stat_offsets_loaded; + /* I2C access methods */ + enum ixl_i2c_access_method_t i2c_access_method; + s32 (*read_i2c_byte)(struct ixl_pf *pf, u8 byte_offset, + u8 dev_addr, u8 *data); + s32 (*write_i2c_byte)(struct ixl_pf *pf, u8 byte_offset, + u8 dev_addr, u8 data); + /* SR-IOV */ struct ixl_vf *vfs; int num_vfs; uint16_t veb_seid; struct task vflr_task; int vc_debug_lvl; }; + /* * Defines used for NVM update ioctls. * This value is used in the Solaris tool, too. */ #define I40E_NVM_ACCESS \ (((((((('E' << 4) + '1') << 4) + 'K') << 4) + 'G') << 4) | 5) #define IXL_DEFAULT_PHY_INT_MASK \ ((~(I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL \ | I40E_AQ_EVENT_MEDIA_NA)) & 0x3FF) /*** Sysctl help messages; displayed with "sysctl -d" ***/ #define IXL_SYSCTL_HELP_SET_ADVERTISE \ "\nControl advertised link speed.\n" \ "Flags:\n" \ "\t 0x1 - advertise 100M\n" \ "\t 0x2 - advertise 1G\n" \ "\t 0x4 - advertise 10G\n" \ "\t 0x8 - advertise 20G\n" \ "\t0x10 - advertise 25G\n" \ -"\t0x20 - advertise 40G\n\n" \ +"\t0x20 - advertise 40G\n" \ +"\t0x40 - advertise 2.5G\n" \ +"\t0x80 - advertise 5G\n\n" \ "Set to 0 to disable link.\n" \ "Use \"sysctl -x\" to view flags properly." #define IXL_SYSCTL_HELP_SUPPORTED_SPEED \ "\nSupported link speeds.\n" \ "Flags:\n" \ "\t 0x1 - 100M\n" \ "\t 0x2 - 1G\n" \ "\t 0x4 - 10G\n" \ "\t 0x8 - 20G\n" \ "\t0x10 - 25G\n" \ -"\t0x20 - 40G\n\n" \ +"\t0x20 - 40G\n" \ +"\t0x40 - 2.5G\n" \ +"\t0x80 - 5G\n\n" \ "Use \"sysctl -x\" to view flags properly." #define IXL_SYSCTL_HELP_FC \ "\nSet flow control mode using the values below.\n" \ "\t0 - off\n" \ "\t1 - rx pause\n" \ "\t2 - tx pause\n" \ "\t3 - tx and rx pause" #define IXL_SYSCTL_HELP_LINK_STATUS \ "\nExecutes a \"Get Link Status\" command on the Admin Queue, and displays" \ " the response." \ #define IXL_SYSCTL_HELP_FW_LLDP \ "\nFW LLDP engine:\n" \ "\t0 - disable\n" \ "\t1 - enable\n" +#define IXL_SYSCTL_HELP_I2C_METHOD \ +"\nI2C access method that driver will use:\n" \ +"\t0 - best available method\n" \ +"\t1 - bit bang via I2CPARAMS register\n" \ +"\t2 - register read/write via I2CCMD register\n" \ +"\t3 - Use Admin Queue command (best)\n" \ +"Using the Admin Queue is only supported on 710 devices with FW version 1.7 or higher" + extern const char * const ixl_fc_string[6]; MALLOC_DECLARE(M_IXL); /*** Functions / Macros ***/ /* Adjust the level here to 10 or over to print stats messages */ #define I40E_VC_DEBUG(p, level, ...) \ do { \ if (level < 10) \ ixl_dbg(p, IXL_DBG_IOV_VC, ##__VA_ARGS__); \ } while (0) #define i40e_send_vf_nack(pf, vf, op, st) \ ixl_send_vf_nack_msg((pf), (vf), (op), (st), __FILE__, __LINE__) #define IXL_PF_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->pf_mtx, _name, "IXL PF Lock", MTX_DEF) #define IXL_PF_LOCK(_sc) mtx_lock(&(_sc)->pf_mtx) #define IXL_PF_UNLOCK(_sc) mtx_unlock(&(_sc)->pf_mtx) #define IXL_PF_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->pf_mtx) #define IXL_PF_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->pf_mtx, MA_OWNED) /* Debug printing */ #define ixl_dbg(p, m, s, ...) ixl_debug_core(p, m, s, ##__VA_ARGS__) void ixl_debug_core(struct ixl_pf *, enum ixl_dbg_mask, char *, ...); -/* For stats sysctl naming */ -#define QUEUE_NAME_LEN 32 - /* For netmap(4) compatibility */ #define ixl_disable_intr(vsi) ixl_disable_rings_intr(vsi) /* * PF-only function declarations */ int ixl_setup_interface(device_t, struct ixl_vsi *); void ixl_print_nvm_cmd(device_t, struct i40e_nvm_access *); -char * ixl_aq_speed_to_str(enum i40e_aq_link_speed); void ixl_handle_que(void *context, int pending); void ixl_init(void *); void ixl_local_timer(void *); void ixl_register_vlan(void *, struct ifnet *, u16); void ixl_unregister_vlan(void *, struct ifnet *, u16); void ixl_intr(void *); void ixl_msix_que(void *); void ixl_msix_adminq(void *); void ixl_do_adminq(void *, int); int ixl_res_alloc_cmp(const void *, const void *); char * ixl_switch_res_type_string(u8); char * ixl_switch_element_string(struct sbuf *, struct i40e_aqc_switch_config_element_resp *); void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *, struct sysctl_oid_list *, struct i40e_hw_port_stats *); void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *, struct sysctl_oid_list *, struct i40e_eth_stats *); void ixl_media_status(struct ifnet *, struct ifmediareq *); int ixl_media_change(struct ifnet *); int ixl_ioctl(struct ifnet *, u_long, caddr_t); void ixl_enable_queue(struct i40e_hw *, int); void ixl_disable_queue(struct i40e_hw *, int); void ixl_enable_intr0(struct i40e_hw *); void ixl_disable_intr0(struct i40e_hw *); void ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf); void ixl_stat_update48(struct i40e_hw *, u32, u32, bool, u64 *, u64 *); void ixl_stat_update32(struct i40e_hw *, u32, bool, u64 *, u64 *); void ixl_stop(struct ixl_pf *); -void ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi, struct sysctl_ctx_list *ctx, const char *sysctl_name); +void ixl_vsi_add_sysctls(struct ixl_vsi *, const char *, bool); int ixl_get_hw_capabilities(struct ixl_pf *); void ixl_link_up_msg(struct ixl_pf *); void ixl_update_link_status(struct ixl_pf *); int ixl_allocate_pci_resources(struct ixl_pf *); int ixl_setup_stations(struct ixl_pf *); int ixl_switch_config(struct ixl_pf *); void ixl_stop_locked(struct ixl_pf *); int ixl_teardown_hw_structs(struct ixl_pf *); int ixl_reset(struct ixl_pf *); void ixl_init_locked(struct ixl_pf *); void ixl_set_rss_key(struct ixl_pf *); void ixl_set_rss_pctypes(struct ixl_pf *); void ixl_set_rss_hlut(struct ixl_pf *); int ixl_setup_adminq_msix(struct ixl_pf *); int ixl_setup_adminq_tq(struct ixl_pf *); int ixl_teardown_adminq_msix(struct ixl_pf *); void ixl_configure_intr0_msix(struct ixl_pf *); void ixl_configure_queue_intr_msix(struct ixl_pf *); void ixl_free_adminq_tq(struct ixl_pf *); int ixl_setup_legacy(struct ixl_pf *); int ixl_init_msix(struct ixl_pf *); void ixl_configure_itr(struct ixl_pf *); void ixl_configure_legacy(struct ixl_pf *); void ixl_free_pci_resources(struct ixl_pf *); void ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *); void ixl_config_rss(struct ixl_pf *); int ixl_set_advertised_speeds(struct ixl_pf *, int, bool); void ixl_set_initial_advertised_speeds(struct ixl_pf *); void ixl_print_nvm_version(struct ixl_pf *pf); void ixl_add_device_sysctls(struct ixl_pf *); void ixl_handle_mdd_event(struct ixl_pf *); void ixl_add_hw_stats(struct ixl_pf *); void ixl_update_stats_counters(struct ixl_pf *); void ixl_pf_reset_stats(struct ixl_pf *); void ixl_get_bus_info(struct ixl_pf *pf); int ixl_aq_get_link_status(struct ixl_pf *, struct i40e_aqc_get_link_status *); int ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *); +int ixl_handle_i2c_eeprom_read_cmd(struct ixl_pf *, struct ifreq *ifr); void ixl_handle_empr_reset(struct ixl_pf *); int ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up); int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *, bool is_up); void ixl_set_queue_rx_itr(struct ixl_queue *); void ixl_set_queue_tx_itr(struct ixl_queue *); void ixl_add_filter(struct ixl_vsi *, const u8 *, s16 vlan); void ixl_del_filter(struct ixl_vsi *, const u8 *, s16 vlan); void ixl_reconfigure_filters(struct ixl_vsi *vsi); int ixl_disable_rings(struct ixl_vsi *); int ixl_disable_tx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16); int ixl_disable_rx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16); int ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *, u16); int ixl_enable_rings(struct ixl_vsi *); int ixl_enable_tx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16); int ixl_enable_rx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16); int ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *, u16); void ixl_update_eth_stats(struct ixl_vsi *); void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int); int ixl_initialize_vsi(struct ixl_vsi *); void ixl_add_ifmedia(struct ixl_vsi *, u64); int ixl_setup_queue_msix(struct ixl_vsi *); int ixl_setup_queue_tqs(struct ixl_vsi *); int ixl_teardown_queue_msix(struct ixl_vsi *); void ixl_free_queue_tqs(struct ixl_vsi *); void ixl_enable_intr(struct ixl_vsi *); void ixl_disable_rings_intr(struct ixl_vsi *); void ixl_set_promisc(struct ixl_vsi *); void ixl_add_multi(struct ixl_vsi *); void ixl_del_multi(struct ixl_vsi *); -void ixl_setup_vlan_filters(struct ixl_vsi *); void ixl_init_filters(struct ixl_vsi *); void ixl_add_hw_filters(struct ixl_vsi *, int, int); void ixl_del_hw_filters(struct ixl_vsi *, int); +void ixl_del_default_hw_filters(struct ixl_vsi *); struct ixl_mac_filter * ixl_find_filter(struct ixl_vsi *, const u8 *, s16); void ixl_add_mc_filter(struct ixl_vsi *, u8 *); void ixl_free_mac_filters(struct ixl_vsi *vsi); void ixl_update_vsi_stats(struct ixl_vsi *); void ixl_vsi_reset_stats(struct ixl_vsi *); int ixl_vsi_setup_queues(struct ixl_vsi *vsi); void ixl_vsi_free_queues(struct ixl_vsi *vsi); +bool ixl_fw_recovery_mode(struct ixl_pf *); /* * I2C Function prototypes */ int ixl_find_i2c_interface(struct ixl_pf *); -s32 ixl_read_i2c_byte(struct ixl_pf *pf, u8 byte_offset, +s32 ixl_read_i2c_byte_bb(struct ixl_pf *pf, u8 byte_offset, u8 dev_addr, u8 *data); -s32 ixl_write_i2c_byte(struct ixl_pf *pf, u8 byte_offset, +s32 ixl_write_i2c_byte_bb(struct ixl_pf *pf, u8 byte_offset, u8 dev_addr, u8 data); +s32 ixl_read_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 ixl_write_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset, + u8 dev_addr, u8 data); +s32 ixl_read_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 ixl_write_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset, + u8 dev_addr, u8 data); -int ixl_get_fw_lldp_status(struct ixl_pf *pf); int ixl_attach_get_link_status(struct ixl_pf *); #endif /* _IXL_PF_H_ */ Index: releng/11.3/sys/dev/ixl/ixl_pf_i2c.c =================================================================== --- releng/11.3/sys/dev/ixl/ixl_pf_i2c.c (revision 349180) +++ releng/11.3/sys/dev/ixl/ixl_pf_i2c.c (revision 349181) @@ -1,605 +1,743 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixl_pf.h" #define IXL_I2C_T_RISE 1 #define IXL_I2C_T_FALL 1 #define IXL_I2C_T_SU_DATA 1 #define IXL_I2C_T_SU_STA 5 #define IXL_I2C_T_SU_STO 4 #define IXL_I2C_T_HD_STA 4 #define IXL_I2C_T_LOW 5 #define IXL_I2C_T_HIGH 4 #define IXL_I2C_T_BUF 5 #define IXL_I2C_CLOCK_STRETCHING_TIMEOUT 500 #define IXL_I2C_REG(_hw) \ - I40E_GLGEN_I2CPARAMS(((struct i40e_osdep *)(_hw)->back)->i2c_intfc_num) + I40E_GLGEN_I2CPARAMS(_hw->func_caps.mdio_port_num) - +/* I2C bit-banging functions */ static s32 ixl_set_i2c_data(struct ixl_pf *pf, u32 *i2cctl, bool data); static bool ixl_get_i2c_data(struct ixl_pf *pf, u32 *i2cctl); static void ixl_raise_i2c_clk(struct ixl_pf *pf, u32 *i2cctl); static void ixl_lower_i2c_clk(struct ixl_pf *pf, u32 *i2cctl); static s32 ixl_clock_out_i2c_bit(struct ixl_pf *pf, bool data); static s32 ixl_get_i2c_ack(struct ixl_pf *pf); static s32 ixl_clock_out_i2c_byte(struct ixl_pf *pf, u8 data); static s32 ixl_clock_in_i2c_bit(struct ixl_pf *pf, bool *data); static s32 ixl_clock_in_i2c_byte(struct ixl_pf *pf, u8 *data); static void ixl_i2c_bus_clear(struct ixl_pf *pf); static void ixl_i2c_start(struct ixl_pf *pf); static void ixl_i2c_stop(struct ixl_pf *pf); +static s32 ixl_wait_for_i2c_completion(struct i40e_hw *hw, u8 portnum); + /** * ixl_i2c_bus_clear - Clears the I2C bus * @hw: pointer to hardware structure * * Clears the I2C bus by sending nine clock pulses. * Used when data line is stuck low. **/ static void ixl_i2c_bus_clear(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 i2cctl = rd32(hw, IXL_I2C_REG(hw)); u32 i; DEBUGFUNC("ixl_i2c_bus_clear"); ixl_i2c_start(pf); ixl_set_i2c_data(pf, &i2cctl, 1); for (i = 0; i < 9; i++) { ixl_raise_i2c_clk(pf, &i2cctl); /* Min high period of clock is 4us */ i40e_usec_delay(IXL_I2C_T_HIGH); ixl_lower_i2c_clk(pf, &i2cctl); /* Min low period of clock is 4.7us*/ i40e_usec_delay(IXL_I2C_T_LOW); } ixl_i2c_start(pf); /* Put the i2c bus back to default state */ ixl_i2c_stop(pf); } /** * ixl_i2c_stop - Sets I2C stop condition * @hw: pointer to hardware structure * * Sets I2C stop condition (Low -> High on SDA while SCL is High) **/ static void ixl_i2c_stop(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 i2cctl = rd32(hw, IXL_I2C_REG(hw)); DEBUGFUNC("ixl_i2c_stop"); /* Stop condition must begin with data low and clock high */ ixl_set_i2c_data(pf, &i2cctl, 0); ixl_raise_i2c_clk(pf, &i2cctl); /* Setup time for stop condition (4us) */ i40e_usec_delay(IXL_I2C_T_SU_STO); ixl_set_i2c_data(pf, &i2cctl, 1); /* bus free time between stop and start (4.7us)*/ i40e_usec_delay(IXL_I2C_T_BUF); } /** * ixl_clock_in_i2c_byte - Clocks in one byte via I2C * @hw: pointer to hardware structure * @data: data byte to clock in * * Clocks in one byte data via I2C data/clock **/ static s32 ixl_clock_in_i2c_byte(struct ixl_pf *pf, u8 *data) { s32 i; bool bit = 0; DEBUGFUNC("ixl_clock_in_i2c_byte"); for (i = 7; i >= 0; i--) { ixl_clock_in_i2c_bit(pf, &bit); *data |= bit << i; } return I40E_SUCCESS; } /** * ixl_clock_in_i2c_bit - Clocks in one bit via I2C data/clock * @hw: pointer to hardware structure * @data: read data value * * Clocks in one bit via I2C data/clock **/ static s32 ixl_clock_in_i2c_bit(struct ixl_pf *pf, bool *data) { struct i40e_hw *hw = &pf->hw; u32 i2cctl = rd32(hw, IXL_I2C_REG(hw)); DEBUGFUNC("ixl_clock_in_i2c_bit"); ixl_raise_i2c_clk(pf, &i2cctl); /* Minimum high period of clock is 4us */ i40e_usec_delay(IXL_I2C_T_HIGH); i2cctl = rd32(hw, IXL_I2C_REG(hw)); i2cctl |= I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK; wr32(hw, IXL_I2C_REG(hw), i2cctl); ixl_flush(hw); i2cctl = rd32(hw, IXL_I2C_REG(hw)); *data = ixl_get_i2c_data(pf, &i2cctl); ixl_lower_i2c_clk(pf, &i2cctl); /* Minimum low period of clock is 4.7 us */ i40e_usec_delay(IXL_I2C_T_LOW); return I40E_SUCCESS; } /** * ixl_get_i2c_ack - Polls for I2C ACK * @hw: pointer to hardware structure * * Clocks in/out one bit via I2C data/clock **/ static s32 ixl_get_i2c_ack(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; s32 status = I40E_SUCCESS; u32 i = 0; u32 i2cctl = rd32(hw, IXL_I2C_REG(hw)); u32 timeout = 10; bool ack = 1; ixl_raise_i2c_clk(pf, &i2cctl); /* Minimum high period of clock is 4us */ i40e_usec_delay(IXL_I2C_T_HIGH); i2cctl = rd32(hw, IXL_I2C_REG(hw)); i2cctl |= I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK; wr32(hw, IXL_I2C_REG(hw), i2cctl); ixl_flush(hw); /* Poll for ACK. Note that ACK in I2C spec is * transition from 1 to 0 */ for (i = 0; i < timeout; i++) { i2cctl = rd32(hw, IXL_I2C_REG(hw)); ack = ixl_get_i2c_data(pf, &i2cctl); i40e_usec_delay(1); if (!ack) break; } if (ack) { ixl_dbg(pf, IXL_DBG_I2C, "I2C ack was not received.\n"); status = I40E_ERR_PHY; } ixl_lower_i2c_clk(pf, &i2cctl); /* Minimum low period of clock is 4.7 us */ i40e_usec_delay(IXL_I2C_T_LOW); return status; } /** * ixl_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock * @hw: pointer to hardware structure * @data: data value to write * * Clocks out one bit via I2C data/clock **/ static s32 ixl_clock_out_i2c_bit(struct ixl_pf *pf, bool data) { struct i40e_hw *hw = &pf->hw; s32 status; u32 i2cctl = rd32(hw, IXL_I2C_REG(hw)); status = ixl_set_i2c_data(pf, &i2cctl, data); if (status == I40E_SUCCESS) { ixl_raise_i2c_clk(pf, &i2cctl); /* Minimum high period of clock is 4us */ i40e_usec_delay(IXL_I2C_T_HIGH); ixl_lower_i2c_clk(pf, &i2cctl); /* Minimum low period of clock is 4.7 us. * This also takes care of the data hold time. */ i40e_usec_delay(IXL_I2C_T_LOW); } else { status = I40E_ERR_PHY; ixl_dbg(pf, IXL_DBG_I2C, "I2C data was not set to %#x\n", data); } return status; } /** * ixl_clock_out_i2c_byte - Clocks out one byte via I2C * @hw: pointer to hardware structure * @data: data byte clocked out * * Clocks out one byte data via I2C data/clock **/ static s32 ixl_clock_out_i2c_byte(struct ixl_pf *pf, u8 data) { struct i40e_hw *hw = &pf->hw; s32 status = I40E_SUCCESS; s32 i; u32 i2cctl; bool bit; DEBUGFUNC("ixl_clock_out_i2c_byte"); for (i = 7; i >= 0; i--) { bit = (data >> i) & 0x1; status = ixl_clock_out_i2c_bit(pf, bit); if (status != I40E_SUCCESS) break; } /* Release SDA line (set high) */ i2cctl = rd32(hw, IXL_I2C_REG(hw)); i2cctl |= I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK; i2cctl &= ~(I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK); wr32(hw, IXL_I2C_REG(hw), i2cctl); ixl_flush(hw); return status; } /** * ixl_lower_i2c_clk - Lowers the I2C SCL clock * @hw: pointer to hardware structure * @i2cctl: Current value of I2CCTL register * * Lowers the I2C clock line '1'->'0' **/ static void ixl_lower_i2c_clk(struct ixl_pf *pf, u32 *i2cctl) { struct i40e_hw *hw = &pf->hw; *i2cctl &= ~(I40E_GLGEN_I2CPARAMS_CLK_MASK); *i2cctl &= ~(I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK); wr32(hw, IXL_I2C_REG(hw), *i2cctl); ixl_flush(hw); /* SCL fall time (300ns) */ i40e_usec_delay(IXL_I2C_T_FALL); } /** * ixl_raise_i2c_clk - Raises the I2C SCL clock * @hw: pointer to hardware structure * @i2cctl: Current value of I2CCTL register * * Raises the I2C clock line '0'->'1' **/ static void ixl_raise_i2c_clk(struct ixl_pf *pf, u32 *i2cctl) { struct i40e_hw *hw = &pf->hw; u32 i = 0; u32 timeout = IXL_I2C_CLOCK_STRETCHING_TIMEOUT; u32 i2cctl_r = 0; for (i = 0; i < timeout; i++) { *i2cctl |= I40E_GLGEN_I2CPARAMS_CLK_MASK; *i2cctl &= ~(I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK); wr32(hw, IXL_I2C_REG(hw), *i2cctl); ixl_flush(hw); /* SCL rise time (1000ns) */ i40e_usec_delay(IXL_I2C_T_RISE); i2cctl_r = rd32(hw, IXL_I2C_REG(hw)); if (i2cctl_r & I40E_GLGEN_I2CPARAMS_CLK_IN_MASK) break; } } /** * ixl_get_i2c_data - Reads the I2C SDA data bit * @hw: pointer to hardware structure * @i2cctl: Current value of I2CCTL register * * Returns the I2C data bit value **/ static bool ixl_get_i2c_data(struct ixl_pf *pf, u32 *i2cctl) { bool data; if (*i2cctl & I40E_GLGEN_I2CPARAMS_DATA_IN_MASK) data = 1; else data = 0; return data; } /** * ixl_set_i2c_data - Sets the I2C data bit * @hw: pointer to hardware structure * @i2cctl: Current value of I2CCTL register * @data: I2C data value (0 or 1) to set * * Sets the I2C data bit **/ static s32 ixl_set_i2c_data(struct ixl_pf *pf, u32 *i2cctl, bool data) { struct i40e_hw *hw = &pf->hw; s32 status = I40E_SUCCESS; DEBUGFUNC("ixl_set_i2c_data"); if (data) *i2cctl |= I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK; else *i2cctl &= ~(I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK); *i2cctl &= ~(I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK); wr32(hw, IXL_I2C_REG(hw), *i2cctl); ixl_flush(hw); /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ i40e_usec_delay(IXL_I2C_T_RISE + IXL_I2C_T_FALL + IXL_I2C_T_SU_DATA); /* Verify data was set correctly */ *i2cctl = rd32(hw, IXL_I2C_REG(hw)); if (data != ixl_get_i2c_data(pf, i2cctl)) { status = I40E_ERR_PHY; ixl_dbg(pf, IXL_DBG_I2C, "Error - I2C data was not set to %X.\n", data); } return status; } /** * ixl_i2c_start - Sets I2C start condition * Sets I2C start condition (High -> Low on SDA while SCL is High) **/ static void ixl_i2c_start(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 i2cctl = rd32(hw, IXL_I2C_REG(hw)); DEBUGFUNC("ixl_i2c_start"); /* Start condition must begin with data and clock high */ ixl_set_i2c_data(pf, &i2cctl, 1); ixl_raise_i2c_clk(pf, &i2cctl); /* Setup time for start condition (4.7us) */ i40e_usec_delay(IXL_I2C_T_SU_STA); ixl_set_i2c_data(pf, &i2cctl, 0); /* Hold time for start condition (4us) */ i40e_usec_delay(IXL_I2C_T_HD_STA); ixl_lower_i2c_clk(pf, &i2cctl); /* Minimum low period of clock is 4.7 us */ i40e_usec_delay(IXL_I2C_T_LOW); } /** - * ixl_read_i2c_byte - Reads 8 bit word over I2C + * ixl_read_i2c_byte_bb - Reads 8 bit word over I2C **/ s32 -ixl_read_i2c_byte(struct ixl_pf *pf, u8 byte_offset, +ixl_read_i2c_byte_bb(struct ixl_pf *pf, u8 byte_offset, u8 dev_addr, u8 *data) { struct i40e_hw *hw = &pf->hw; u32 max_retry = 10; u32 retry = 0; bool nack = 1; s32 status; *data = 0; u32 i2cctl = rd32(hw, IXL_I2C_REG(hw)); i2cctl |= I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK; wr32(hw, IXL_I2C_REG(hw), i2cctl); ixl_flush(hw); do { ixl_i2c_start(pf); /* Device Address and write indication */ status = ixl_clock_out_i2c_byte(pf, dev_addr); if (status != I40E_SUCCESS) { ixl_dbg(pf, IXL_DBG_I2C, "dev_addr clock out error\n"); goto fail; } status = ixl_get_i2c_ack(pf); if (status != I40E_SUCCESS) { ixl_dbg(pf, IXL_DBG_I2C, "dev_addr i2c ack error\n"); goto fail; } status = ixl_clock_out_i2c_byte(pf, byte_offset); if (status != I40E_SUCCESS) { ixl_dbg(pf, IXL_DBG_I2C, "byte_offset clock out error\n"); goto fail; } status = ixl_get_i2c_ack(pf); if (status != I40E_SUCCESS) { ixl_dbg(pf, IXL_DBG_I2C, "byte_offset i2c ack error\n"); goto fail; } ixl_i2c_start(pf); /* Device Address and read indication */ status = ixl_clock_out_i2c_byte(pf, (dev_addr | 0x1)); if (status != I40E_SUCCESS) goto fail; status = ixl_get_i2c_ack(pf); if (status != I40E_SUCCESS) goto fail; status = ixl_clock_in_i2c_byte(pf, data); if (status != I40E_SUCCESS) goto fail; status = ixl_clock_out_i2c_bit(pf, nack); if (status != I40E_SUCCESS) goto fail; ixl_i2c_stop(pf); status = I40E_SUCCESS; goto done; fail: ixl_i2c_bus_clear(pf); i40e_msec_delay(100); retry++; if (retry < max_retry) - ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read error - Retrying.\n"); + ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read error - Retrying\n"); else - ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read error.\n"); + ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read error\n"); } while (retry < max_retry); done: i2cctl = rd32(hw, IXL_I2C_REG(hw)); i2cctl &= ~I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK; wr32(hw, IXL_I2C_REG(hw), i2cctl); ixl_flush(hw); return status; } /** - * ixl_write_i2c_byte - Writes 8 bit word over I2C + * ixl_write_i2c_byte_bb - Writes 8 bit word over I2C **/ s32 -ixl_write_i2c_byte(struct ixl_pf *pf, u8 byte_offset, +ixl_write_i2c_byte_bb(struct ixl_pf *pf, u8 byte_offset, u8 dev_addr, u8 data) { struct i40e_hw *hw = &pf->hw; s32 status = I40E_SUCCESS; u32 max_retry = 1; u32 retry = 0; u32 i2cctl = rd32(hw, IXL_I2C_REG(hw)); i2cctl |= I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK; wr32(hw, IXL_I2C_REG(hw), i2cctl); ixl_flush(hw); do { ixl_i2c_start(pf); status = ixl_clock_out_i2c_byte(pf, dev_addr); if (status != I40E_SUCCESS) goto fail; status = ixl_get_i2c_ack(pf); if (status != I40E_SUCCESS) goto fail; status = ixl_clock_out_i2c_byte(pf, byte_offset); if (status != I40E_SUCCESS) goto fail; status = ixl_get_i2c_ack(pf); if (status != I40E_SUCCESS) goto fail; status = ixl_clock_out_i2c_byte(pf, data); if (status != I40E_SUCCESS) goto fail; status = ixl_get_i2c_ack(pf); if (status != I40E_SUCCESS) goto fail; ixl_i2c_stop(pf); goto write_byte_out; fail: ixl_i2c_bus_clear(pf); i40e_msec_delay(100); retry++; if (retry < max_retry) - ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write error - Retrying.\n"); + ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write error - Retrying\n"); else - ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write error.\n"); + ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write error\n"); } while (retry < max_retry); write_byte_out: i2cctl = rd32(hw, IXL_I2C_REG(hw)); i2cctl &= ~I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK; wr32(hw, IXL_I2C_REG(hw), i2cctl); ixl_flush(hw); return status; } +/** + * ixl_read_i2c_byte_reg - Reads 8 bit word over I2C using a hardware register + **/ +s32 +ixl_read_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + struct i40e_hw *hw = &pf->hw; + u32 reg = 0; + s32 status; + *data = 0; + + reg |= (byte_offset << I40E_GLGEN_I2CCMD_REGADD_SHIFT); + reg |= (((dev_addr >> 1) & 0x7) << I40E_GLGEN_I2CCMD_PHYADD_SHIFT); + reg |= I40E_GLGEN_I2CCMD_OP_MASK; + wr32(hw, I40E_GLGEN_I2CCMD(hw->func_caps.mdio_port_num), reg); + + status = ixl_wait_for_i2c_completion(hw, hw->func_caps.mdio_port_num); + + /* Get data from I2C register */ + reg = rd32(hw, I40E_GLGEN_I2CCMD(hw->func_caps.mdio_port_num)); + + /* Retrieve data readed from EEPROM */ + *data = (u8)(reg & 0xff); + + if (status) + ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read error\n"); + return status; +} + +/** + * ixl_write_i2c_byte_reg - Writes 8 bit word over I2C using a hardware register + **/ +s32 +ixl_write_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset, + u8 dev_addr, u8 data) +{ + struct i40e_hw *hw = &pf->hw; + s32 status = I40E_SUCCESS; + u32 reg = 0; + u8 upperbyte = 0; + u16 datai2c = 0; + + status = ixl_read_i2c_byte_reg(pf, byte_offset + 1, dev_addr, &upperbyte); + datai2c = ((u16)upperbyte << 8) | (u16)data; + reg = rd32(hw, I40E_GLGEN_I2CCMD(hw->func_caps.mdio_port_num)); + + /* Form write command */ + reg &= ~I40E_GLGEN_I2CCMD_PHYADD_MASK; + reg |= (((dev_addr >> 1) & 0x7) << I40E_GLGEN_I2CCMD_PHYADD_SHIFT); + reg &= ~I40E_GLGEN_I2CCMD_REGADD_MASK; + reg |= (byte_offset << I40E_GLGEN_I2CCMD_REGADD_SHIFT); + reg &= ~I40E_GLGEN_I2CCMD_DATA_MASK; + reg |= (datai2c << I40E_GLGEN_I2CCMD_DATA_SHIFT); + reg &= ~I40E_GLGEN_I2CCMD_OP_MASK; + + /* Write command to registers controling I2C - data and address. */ + wr32(hw, I40E_GLGEN_I2CCMD(hw->func_caps.mdio_port_num), reg); + + status = ixl_wait_for_i2c_completion(hw, hw->func_caps.mdio_port_num); + + if (status) + ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write error\n"); + return status; +} + +/** + * ixl_wait_for_i2c_completion + **/ +static s32 +ixl_wait_for_i2c_completion(struct i40e_hw *hw, u8 portnum) +{ + s32 status = 0; + u32 timeout = 100; + u32 reg; + do { + reg = rd32(hw, I40E_GLGEN_I2CCMD(portnum)); + if ((reg & I40E_GLGEN_I2CCMD_R_MASK) != 0) + break; + i40e_usec_delay(10); + } while (timeout-- > 0); + + if (timeout == 0) + return I40E_ERR_TIMEOUT; + else + return status; +} + +/** + * ixl_read_i2c_byte_aq - Reads 8 bit word over I2C using a hardware register + **/ +s32 +ixl_read_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + struct i40e_hw *hw = &pf->hw; + s32 status = I40E_SUCCESS; + u32 reg; + + status = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, + dev_addr, false, + byte_offset, + ®, NULL); + + if (status) + ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read status %s, error %s\n", + i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); + else + *data = (u8)reg; + + return status; +} + +/** + * ixl_write_i2c_byte_aq - Writes 8 bit word over I2C using a hardware register + **/ +s32 +ixl_write_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset, + u8 dev_addr, u8 data) +{ + struct i40e_hw *hw = &pf->hw; + s32 status = I40E_SUCCESS; + + status = i40e_aq_set_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, + dev_addr, false, + byte_offset, + data, NULL); + + if (status) + ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write status %s, error %s\n", + i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); + + return status; +} Index: releng/11.3/sys/dev/ixl/ixl_pf_iov.c =================================================================== --- releng/11.3/sys/dev/ixl/ixl_pf_iov.c (revision 349180) +++ releng/11.3/sys/dev/ixl/ixl_pf_iov.c (revision 349181) @@ -1,1900 +1,1903 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixl_pf_iov.h" /* Private functions */ static void ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val); static void ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg); static void ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg); static bool ixl_zero_mac(const uint8_t *addr); static bool ixl_bcast_mac(const uint8_t *addr); static int ixl_vc_opcode_level(uint16_t opcode); static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr); static int ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf); static int ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf); static void ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf); static void ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi); static void ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf); static int ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf); static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf); static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf); static void ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len); static void ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op); static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line); static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_txq_info *info); static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_rxq_info *info); static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue, enum i40e_queue_type *last_type, uint16_t *last_queue); static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct virtchnl_vector_map *vector); static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static enum i40e_status_code ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf); static void ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static int ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues); static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err); void ixl_initialize_sriov(struct ixl_pf *pf) { device_t dev = pf->dev; struct i40e_hw *hw = &pf->hw; nvlist_t *pf_schema, *vf_schema; int iov_error; /* SR-IOV is only supported when MSI-X is in use. */ if (pf->msix <= 1) return; pf_schema = pci_iov_schema_alloc_node(); vf_schema = pci_iov_schema_alloc_node(); pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof", IOV_SCHEMA_HASDEFAULT, TRUE); pci_iov_schema_add_bool(vf_schema, "allow-set-mac", IOV_SCHEMA_HASDEFAULT, FALSE); pci_iov_schema_add_bool(vf_schema, "allow-promisc", IOV_SCHEMA_HASDEFAULT, FALSE); pci_iov_schema_add_uint16(vf_schema, "num-queues", IOV_SCHEMA_HASDEFAULT, max(1, hw->func_caps.num_msix_vectors_vf - 1) % IXLV_MAX_QUEUES); iov_error = pci_iov_attach(dev, pf_schema, vf_schema); if (iov_error != 0) { device_printf(dev, "Failed to initialize SR-IOV (error=%d)\n", iov_error); } else device_printf(dev, "SR-IOV ready\n"); pf->vc_debug_lvl = 1; } /* * Allocate the VSI for a VF. */ static int ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf) { device_t dev; struct i40e_hw *hw; struct ixl_vsi *vsi; struct i40e_vsi_context vsi_ctx; int i; enum i40e_status_code code; hw = &pf->hw; vsi = &pf->vsi; dev = pf->dev; vsi_ctx.pf_num = hw->pf_id; vsi_ctx.uplink_seid = pf->veb_seid; vsi_ctx.connection_type = IXL_VSI_DATA_PORT; vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num; vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF; bzero(&vsi_ctx.info, sizeof(vsi_ctx.info)); vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID); vsi_ctx.info.switch_id = htole16(0); vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID); vsi_ctx.info.sec_flags = 0; if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF) vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK; vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | I40E_AQ_VSI_PVLAN_EMOD_NOTHING; vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID); vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); /* ERJ: Only scattered allocation is supported for VFs right now */ for (i = 0; i < vf->qtag.num_active; i++) vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i]; for (; i < nitems(vsi_ctx.info.queue_mapping); i++) vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK); vsi_ctx.info.tc_mapping[0] = htole16( (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | (bsrl(vf->qtag.num_allocated) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)); code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL); if (code != I40E_SUCCESS) return (ixl_adminq_err_to_errno(hw->aq.asq_last_status)); vf->vsi.seid = vsi_ctx.seid; vf->vsi.vsi_num = vsi_ctx.vsi_number; // vf->vsi.first_queue = vf->qtag.qidx[0]; vf->vsi.num_queues = vf->qtag.num_active; code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL); if (code != I40E_SUCCESS) return (ixl_adminq_err_to_errno(hw->aq.asq_last_status)); code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL); if (code != I40E_SUCCESS) { device_printf(dev, "Failed to disable BW limit: %d\n", ixl_adminq_err_to_errno(hw->aq.asq_last_status)); return (ixl_adminq_err_to_errno(hw->aq.asq_last_status)); } memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info)); return (0); } static int ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; int error; hw = &pf->hw; + vf->vsi.flags |= IXL_FLAGS_IS_VF; error = ixl_vf_alloc_vsi(pf, vf); if (error != 0) return (error); - vf->vsi.hw_filters_add = 0; - vf->vsi.hw_filters_del = 0; - ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY); - ixl_reconfigure_filters(&vf->vsi); + vf->vsi.dev = pf->dev; + vf->vsi.num_hw_filters = 0; + ixl_init_filters(&vf->vsi); + return (0); } static void ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val) { uint32_t qtable; int index, shift; /* * Two queues are mapped in a single register, so we have to do some * gymnastics to convert the queue number into a register index and * shift. */ index = qnum / 2; shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT; qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num)); qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift); qtable |= val << shift; i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable); } static void ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; uint32_t qtable; int i; hw = &pf->hw; /* * Contiguous mappings aren't actually supported by the hardware, * so we have to use non-contiguous mappings. */ i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num), I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); /* Enable LAN traffic on this VF */ wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num), I40E_VPLAN_MAPENA_TXRX_ENA_MASK); /* Program index of each VF queue into PF queue space * (This is only needed if QTABLE is enabled) */ for (i = 0; i < vf->vsi.num_queues; i++) { qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) << I40E_VPLAN_QTABLE_QINDEX_SHIFT; wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable); } for (; i < IXL_MAX_VSI_QUEUES; i++) wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), I40E_VPLAN_QTABLE_QINDEX_MASK); /* Map queues allocated to VF to its VSI; * This mapping matches the VF-wide mapping since the VF * is only given a single VSI */ for (i = 0; i < vf->vsi.num_queues; i++) ixl_vf_map_vsi_queue(hw, vf, i, ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i)); /* Set rest of VSI queues as unused. */ for (; i < IXL_MAX_VSI_QUEUES; i++) ixl_vf_map_vsi_queue(hw, vf, i, I40E_VSILAN_QTABLE_QINDEX_0_MASK); ixl_flush(hw); } static void ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi) { struct i40e_hw *hw; hw = &pf->hw; if (vsi->seid == 0) return; i40e_aq_delete_element(hw, vsi->seid, NULL); } static void ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg) { wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); ixl_flush(hw); } static void ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg) { wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); ixl_flush(hw); } static void ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; uint32_t vfint_reg, vpint_reg; int i; hw = &pf->hw; ixl_vf_vsi_release(pf, &vf->vsi); /* Index 0 has a special register. */ ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num)); for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) { vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num); ixl_vf_disable_queue_intr(hw, vfint_reg); } /* Index 0 has a special register. */ ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num)); for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) { vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num); ixl_vf_unregister_intr(hw, vpint_reg); } vf->vsi.num_queues = 0; } static int ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; int i; uint16_t global_vf_num; uint32_t ciad; hw = &pf->hw; global_vf_num = hw->func_caps.vf_base_id + vf->vf_num; wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS | (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) { ciad = rd32(hw, I40E_PF_PCI_CIAD); if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0) return (0); DELAY(1); } return (ETIMEDOUT); } static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; uint32_t vfrtrig; hw = &pf->hw; vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num)); vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig); ixl_flush(hw); ixl_reinit_vf(pf, vf); } static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; uint32_t vfrstat, vfrtrig; int i, error; hw = &pf->hw; error = ixl_flush_pcie(pf, vf); if (error != 0) device_printf(pf->dev, "Timed out waiting for PCIe activity to stop on VF-%d\n", vf->vf_num); for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) { DELAY(10); vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num)); if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK) break; } if (i == IXL_VF_RESET_TIMEOUT) device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num); wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_COMPLETED); vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num)); vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig); if (vf->vsi.seid != 0) ixl_disable_rings(&vf->vsi); ixl_vf_release_resources(pf, vf); ixl_vf_setup_vsi(pf, vf); ixl_vf_map_queues(pf, vf); wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_VFACTIVE); ixl_flush(hw); } static int ixl_vc_opcode_level(uint16_t opcode) { switch (opcode) { case VIRTCHNL_OP_GET_STATS: return (10); default: return (5); } } static void ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len) { struct i40e_hw *hw; int global_vf_id; hw = &pf->hw; global_vf_id = hw->func_caps.vf_base_id + vf->vf_num; I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op), "Sending msg (op=%s[%d], status=%d) to VF-%d\n", ixl_vc_opcode_str(op), op, status, vf->vf_num); i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL); } static void ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op) { ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0); } static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line) { I40E_VC_DEBUG(pf, 1, "Sending NACK (op=%s[%d], err=%s[%d]) to VF-%d from %s:%d\n", ixl_vc_opcode_str(op), op, i40e_stat_str(&pf->hw, status), status, vf->vf_num, file, line); ixl_send_vf_msg(pf, vf, op, status, NULL, 0); } static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_version_info reply; if (msg_size != sizeof(struct virtchnl_version_info)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_VERSION, I40E_ERR_PARAM); return; } vf->version = ((struct virtchnl_version_info *)msg)->minor; reply.major = VIRTCHNL_VERSION_MAJOR; reply.minor = VIRTCHNL_VERSION_MINOR; ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply, sizeof(reply)); } static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { if (msg_size != 0) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_RESET_VF, I40E_ERR_PARAM); return; } ixl_reset_vf(pf, vf); /* No response to a reset message. */ } static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_vf_resource reply; if ((vf->version == 0 && msg_size != 0) || (vf->version == 1 && msg_size != 4)) { device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size," " for VF version %d.%d\n", VIRTCHNL_VERSION_MAJOR, vf->version); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES, I40E_ERR_PARAM); return; } bzero(&reply, sizeof(reply)); if (vf->version == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS) reply.vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_RSS_REG | VIRTCHNL_VF_OFFLOAD_VLAN; else /* Force VF RSS setup by PF in 1.1+ VFs */ reply.vf_cap_flags = *(u32 *)msg & ( VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_RSS_PF | VIRTCHNL_VF_OFFLOAD_VLAN); reply.num_vsis = 1; reply.num_queue_pairs = vf->vsi.num_queues; reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf; reply.rss_key_size = 52; reply.rss_lut_size = 64; reply.vsi_res[0].vsi_id = vf->vsi.vsi_num; reply.vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues; memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN); ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES, I40E_SUCCESS, &reply, sizeof(reply)); } static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_txq_info *info) { struct i40e_hw *hw; struct i40e_hmc_obj_txq txq; uint16_t global_queue_num, global_vf_num; enum i40e_status_code status; uint32_t qtx_ctl; hw = &pf->hw; global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id); global_vf_num = hw->func_caps.vf_base_id + vf->vf_num; bzero(&txq, sizeof(txq)); DDPRINTF(pf->dev, "VF %d: PF TX queue %d / VF TX queue %d (Global VF %d)\n", vf->vf_num, global_queue_num, info->queue_id, global_vf_num); status = i40e_clear_lan_tx_queue_context(hw, global_queue_num); if (status != I40E_SUCCESS) return (EINVAL); txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS; txq.head_wb_ena = info->headwb_enabled; txq.head_wb_addr = info->dma_headwb_addr; txq.qlen = info->ring_len; txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]); txq.rdylist_act = 0; status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq); if (status != I40E_SUCCESS) return (EINVAL); qtx_ctl = I40E_QTX_CTL_VF_QUEUE | (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) | (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT); wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl); ixl_flush(hw); ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true); return (0); } static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_rxq_info *info) { struct i40e_hw *hw; struct i40e_hmc_obj_rxq rxq; uint16_t global_queue_num; enum i40e_status_code status; hw = &pf->hw; global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id); bzero(&rxq, sizeof(rxq)); DDPRINTF(pf->dev, "VF %d: PF RX queue %d / VF RX queue %d\n", vf->vf_num, global_queue_num, info->queue_id); if (info->databuffer_size > IXL_VF_MAX_BUFFER) return (EINVAL); if (info->max_pkt_size > IXL_VF_MAX_FRAME || info->max_pkt_size < ETHER_MIN_LEN) return (EINVAL); if (info->splithdr_enabled) { if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER) return (EINVAL); rxq.hsplit_0 = info->rx_split_pos & (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 | I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP | I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP | I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP); rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; rxq.dtype = 2; } status = i40e_clear_lan_rx_queue_context(hw, global_queue_num); if (status != I40E_SUCCESS) return (EINVAL); rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS; rxq.qlen = info->ring_len; rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; rxq.dsize = 1; rxq.crcstrip = 1; rxq.l2tsel = 1; rxq.rxmax = info->max_pkt_size; rxq.tphrdesc_ena = 1; rxq.tphwdesc_ena = 1; rxq.tphdata_ena = 1; rxq.tphhead_ena = 1; rxq.lrxqthresh = 2; rxq.prefena = 1; status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq); if (status != I40E_SUCCESS) return (EINVAL); ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, false); return (0); } static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_vsi_queue_config_info *info; struct virtchnl_queue_pair_info *pair; uint16_t expected_msg_size; int i; if (msg_size < sizeof(*info)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } info = msg; if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_queues) { device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n", vf->vf_num, info->num_queue_pairs, vf->vsi.num_queues); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } expected_msg_size = sizeof(*info) + info->num_queue_pairs * sizeof(*pair); if (msg_size != expected_msg_size) { device_printf(pf->dev, "VF %d: size of recvd message (%d) does not match expected size (%d)\n", vf->vf_num, msg_size, expected_msg_size); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } if (info->vsi_id != vf->vsi.vsi_num) { device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n", vf->vf_num, info->vsi_id, vf->vsi.vsi_num); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } for (i = 0; i < info->num_queue_pairs; i++) { pair = &info->qpair[i]; if (pair->txq.vsi_id != vf->vsi.vsi_num || pair->rxq.vsi_id != vf->vsi.vsi_num || pair->txq.queue_id != pair->rxq.queue_id || pair->txq.queue_id >= vf->vsi.num_queues) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } } ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES); } static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue, enum i40e_queue_type *last_type, uint16_t *last_queue) { uint32_t offset, qctl; uint16_t itr_indx; if (cur_type == I40E_QUEUE_TYPE_RX) { offset = I40E_QINT_RQCTL(cur_queue); itr_indx = vector->rxitr_idx; } else { offset = I40E_QINT_TQCTL(cur_queue); itr_indx = vector->txitr_idx; } qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | I40E_QINT_RQCTL_CAUSE_ENA_MASK | (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT)); wr32(&pf->hw, offset, qctl); *last_type = cur_type; *last_queue = cur_queue; } static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct virtchnl_vector_map *vector) { struct i40e_hw *hw; u_int qindex; enum i40e_queue_type type, last_type; uint32_t lnklst_reg; uint16_t rxq_map, txq_map, cur_queue, last_queue; hw = &pf->hw; rxq_map = vector->rxq_map; txq_map = vector->txq_map; last_queue = IXL_END_OF_INTR_LNKLST; last_type = I40E_QUEUE_TYPE_RX; /* * The datasheet says to optimize performance, RX queues and TX queues * should be interleaved in the interrupt linked list, so we process * both at once here. */ while ((rxq_map != 0) || (txq_map != 0)) { if (txq_map != 0) { qindex = ffs(txq_map) - 1; type = I40E_QUEUE_TYPE_TX; cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex); ixl_vf_set_qctl(pf, vector, type, cur_queue, &last_type, &last_queue); txq_map &= ~(1 << qindex); } if (rxq_map != 0) { qindex = ffs(rxq_map) - 1; type = I40E_QUEUE_TYPE_RX; cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex); ixl_vf_set_qctl(pf, vector, type, cur_queue, &last_type, &last_queue); rxq_map &= ~(1 << qindex); } } if (vector->vector_id == 0) lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num); else lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id, vf->vf_num); wr32(hw, lnklst_reg, (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) | (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)); ixl_flush(hw); } static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_irq_map_info *map; struct virtchnl_vector_map *vector; struct i40e_hw *hw; int i, largest_txq, largest_rxq; hw = &pf->hw; if (msg_size < sizeof(*map)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } map = msg; if (map->num_vectors == 0) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } for (i = 0; i < map->num_vectors; i++) { vector = &map->vecmap[i]; if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) || vector->vsi_id != vf->vsi.vsi_num) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } if (vector->rxq_map != 0) { largest_rxq = fls(vector->rxq_map) - 1; if (largest_rxq >= vf->vsi.num_queues) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } } if (vector->txq_map != 0) { largest_txq = fls(vector->txq_map) - 1; if (largest_txq >= vf->vsi.num_queues) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } } if (vector->rxitr_idx > IXL_MAX_ITR_IDX || vector->txitr_idx > IXL_MAX_ITR_IDX) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } ixl_vf_config_vector(pf, vf, vector); } ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP); } static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_queue_select *select; int error = 0; if (msg_size != sizeof(*select)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES, I40E_ERR_PARAM); return; } select = msg; if (select->vsi_id != vf->vsi.vsi_num || select->rx_queues == 0 || select->tx_queues == 0) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES, I40E_ERR_PARAM); return; } /* Enable TX rings selected by the VF */ for (int i = 0; i < 32; i++) { if ((1 << i) & select->tx_queues) { /* Warn if queue is out of VF allocation range */ if (i >= vf->vsi.num_queues) { device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n", vf->vf_num, i); break; } /* Skip this queue if it hasn't been configured */ if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true)) continue; /* Warn if this queue is already marked as enabled */ if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) device_printf(pf->dev, "VF %d: TX ring %d is already enabled!\n", vf->vf_num, i); error = ixl_enable_tx_ring(pf, &vf->qtag, i); if (error) break; else ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true); } } /* Enable RX rings selected by the VF */ for (int i = 0; i < 32; i++) { if ((1 << i) & select->rx_queues) { /* Warn if queue is out of VF allocation range */ if (i >= vf->vsi.num_queues) { device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n", vf->vf_num, i); break; } /* Skip this queue if it hasn't been configured */ if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false)) continue; /* Warn if this queue is already marked as enabled */ if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) device_printf(pf->dev, "VF %d: RX ring %d is already enabled!\n", vf->vf_num, i); error = ixl_enable_rx_ring(pf, &vf->qtag, i); if (error) break; else ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false); } } if (error) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES, I40E_ERR_TIMEOUT); return; } ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES); } static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_queue_select *select; int error = 0; if (msg_size != sizeof(*select)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES, I40E_ERR_PARAM); return; } select = msg; if (select->vsi_id != vf->vsi.vsi_num || select->rx_queues == 0 || select->tx_queues == 0) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES, I40E_ERR_PARAM); return; } /* Disable TX rings selected by the VF */ for (int i = 0; i < 32; i++) { if ((1 << i) & select->tx_queues) { /* Warn if queue is out of VF allocation range */ if (i >= vf->vsi.num_queues) { device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n", vf->vf_num, i); break; } /* Skip this queue if it hasn't been configured */ if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true)) continue; /* Warn if this queue is already marked as disabled */ if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) { device_printf(pf->dev, "VF %d: TX ring %d is already disabled!\n", vf->vf_num, i); continue; } error = ixl_disable_tx_ring(pf, &vf->qtag, i); if (error) break; else ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true); } } /* Enable RX rings selected by the VF */ for (int i = 0; i < 32; i++) { if ((1 << i) & select->rx_queues) { /* Warn if queue is out of VF allocation range */ if (i >= vf->vsi.num_queues) { device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n", vf->vf_num, i); break; } /* Skip this queue if it hasn't been configured */ if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false)) continue; /* Warn if this queue is already marked as disabled */ if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) { device_printf(pf->dev, "VF %d: RX ring %d is already disabled!\n", vf->vf_num, i); continue; } error = ixl_disable_rx_ring(pf, &vf->qtag, i); if (error) break; else ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false); } } if (error) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES, I40E_ERR_TIMEOUT); return; } ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES); } static bool ixl_zero_mac(const uint8_t *addr) { uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0}; return (cmp_etheraddr(addr, zero)); } static bool ixl_bcast_mac(const uint8_t *addr) { return (cmp_etheraddr(addr, ixl_bcast_addr)); } static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr) { if (ixl_zero_mac(addr) || ixl_bcast_mac(addr)) return (EINVAL); /* * If the VF is not allowed to change its MAC address, don't let it * set a MAC filter for an address that is not a multicast address and * is not its assigned MAC. */ if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) && !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac))) return (EPERM); return (0); } static void ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_ether_addr_list *addr_list; struct virtchnl_ether_addr *addr; struct ixl_vsi *vsi; int i; size_t expected_size; vsi = &vf->vsi; if (msg_size < sizeof(*addr_list)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM); return; } addr_list = msg; expected_size = sizeof(*addr_list) + addr_list->num_elements * sizeof(*addr); if (addr_list->num_elements == 0 || addr_list->vsi_id != vsi->vsi_num || msg_size != expected_size) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM); return; } for (i = 0; i < addr_list->num_elements; i++) { if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM); return; } } for (i = 0; i < addr_list->num_elements; i++) { addr = &addr_list->list[i]; ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY); } ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR); } static void ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_ether_addr_list *addr_list; struct virtchnl_ether_addr *addr; size_t expected_size; int i; if (msg_size < sizeof(*addr_list)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM); return; } addr_list = msg; expected_size = sizeof(*addr_list) + addr_list->num_elements * sizeof(*addr); if (addr_list->num_elements == 0 || addr_list->vsi_id != vf->vsi.vsi_num || msg_size != expected_size) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM); return; } for (i = 0; i < addr_list->num_elements; i++) { addr = &addr_list->list[i]; if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM); return; } } for (i = 0; i < addr_list->num_elements; i++) { addr = &addr_list->list[i]; ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY); } ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR); } static enum i40e_status_code ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_vsi_context vsi_ctx; vsi_ctx.seid = vf->vsi.seid; bzero(&vsi_ctx.info, sizeof(vsi_ctx.info)); vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL)); } static void ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_vlan_filter_list *filter_list; enum i40e_status_code code; size_t expected_size; int i; if (msg_size < sizeof(*filter_list)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); return; } filter_list = msg; expected_size = sizeof(*filter_list) + filter_list->num_elements * sizeof(uint16_t); if (filter_list->num_elements == 0 || filter_list->vsi_id != vf->vsi.vsi_num || msg_size != expected_size) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); return; } if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); return; } for (i = 0; i < filter_list->num_elements; i++) { if (filter_list->vlan_id[i] > EVL_VLID_MASK) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); return; } } code = ixl_vf_enable_vlan_strip(pf, vf); if (code != I40E_SUCCESS) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); } for (i = 0; i < filter_list->num_elements; i++) ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]); ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_VLAN); } static void ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_vlan_filter_list *filter_list; int i; size_t expected_size; if (msg_size < sizeof(*filter_list)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN, I40E_ERR_PARAM); return; } filter_list = msg; expected_size = sizeof(*filter_list) + filter_list->num_elements * sizeof(uint16_t); if (filter_list->num_elements == 0 || filter_list->vsi_id != vf->vsi.vsi_num || msg_size != expected_size) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN, I40E_ERR_PARAM); return; } for (i = 0; i < filter_list->num_elements; i++) { if (filter_list->vlan_id[i] > EVL_VLID_MASK) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); return; } } if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); return; } for (i = 0; i < filter_list->num_elements; i++) ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]); ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_VLAN); } static void ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_promisc_info *info; enum i40e_status_code code; if (msg_size != sizeof(*info)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM); return; } if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM); return; } info = msg; if (info->vsi_id != vf->vsi.vsi_num) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM); return; } code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id, info->flags & FLAG_VF_UNICAST_PROMISC, NULL, TRUE); if (code != I40E_SUCCESS) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code); return; } code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id, info->flags & FLAG_VF_MULTICAST_PROMISC, NULL); if (code != I40E_SUCCESS) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code); return; } ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE); } static void ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_queue_select *queue; if (msg_size != sizeof(*queue)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS, I40E_ERR_PARAM); return; } queue = msg; if (queue->vsi_id != vf->vsi.vsi_num) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS, I40E_ERR_PARAM); return; } ixl_update_eth_stats(&vf->vsi); ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_STATS, I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats)); } static void ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct i40e_hw *hw; struct virtchnl_rss_key *key; struct i40e_aqc_get_set_rss_key_data key_data; enum i40e_status_code status; hw = &pf->hw; if (msg_size < sizeof(*key)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY, I40E_ERR_PARAM); return; } key = msg; if (key->key_len > 52) { device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n", vf->vf_num, key->key_len, 52); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY, I40E_ERR_PARAM); return; } if (key->vsi_id != vf->vsi.vsi_num) { device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n", vf->vf_num, key->vsi_id, vf->vsi.vsi_num); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY, I40E_ERR_PARAM); return; } /* Fill out hash using MAC-dependent method */ if (hw->mac.type == I40E_MAC_X722) { bzero(&key_data, sizeof(key_data)); if (key->key_len <= 40) bcopy(key->key, key_data.standard_rss_key, key->key_len); else { bcopy(key->key, key_data.standard_rss_key, 40); bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40); } status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data); if (status) { device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY, I40E_ERR_ADMIN_QUEUE_ERROR); return; } } else { for (int i = 0; i < (key->key_len / 4); i++) i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]); } DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!", vf->vf_num, key->key[0]); ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY); } static void ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct i40e_hw *hw; struct virtchnl_rss_lut *lut; enum i40e_status_code status; hw = &pf->hw; if (msg_size < sizeof(*lut)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT, I40E_ERR_PARAM); return; } lut = msg; if (lut->lut_entries > 64) { device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n", vf->vf_num, lut->lut_entries, 64); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT, I40E_ERR_PARAM); return; } if (lut->vsi_id != vf->vsi.vsi_num) { device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n", vf->vf_num, lut->vsi_id, vf->vsi.vsi_num); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT, I40E_ERR_PARAM); return; } /* Fill out LUT using MAC-dependent method */ if (hw->mac.type == I40E_MAC_X722) { status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries); if (status) { device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT, I40E_ERR_ADMIN_QUEUE_ERROR); return; } } else { for (int i = 0; i < (lut->lut_entries / 4); i++) i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]); } DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!", vf->vf_num, lut->lut[0], lut->lut_entries); ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT); } static void ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct i40e_hw *hw; struct virtchnl_rss_hena *hena; hw = &pf->hw; if (msg_size < sizeof(*hena)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA, I40E_ERR_PARAM); return; } hena = msg; /* Set HENA */ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena); i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32)); DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx", vf->vf_num, hena->hena); ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA); } static void ixl_notify_vf_link_state(struct ixl_pf *pf, struct ixl_vf *vf) { struct virtchnl_pf_event event; struct i40e_hw *hw; hw = &pf->hw; event.event = VIRTCHNL_EVENT_LINK_CHANGE; event.severity = PF_EVENT_SEVERITY_INFO; event.event_data.link_event.link_status = pf->vsi.link_active; event.event_data.link_event.link_speed = (enum virtchnl_link_speed)hw->phy.link_info.link_speed; ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_EVENT, I40E_SUCCESS, &event, sizeof(event)); } void ixl_broadcast_link_state(struct ixl_pf *pf) { int i; for (i = 0; i < pf->num_vfs; i++) ixl_notify_vf_link_state(pf, &pf->vfs[i]); } void ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event) { struct ixl_vf *vf; void *msg; uint16_t vf_num, msg_size; uint32_t opcode; vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id; opcode = le32toh(event->desc.cookie_high); if (vf_num >= pf->num_vfs) { device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num); return; } vf = &pf->vfs[vf_num]; msg = event->msg_buf; msg_size = event->msg_len; I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode), "Got msg %s(%d) from%sVF-%d of size %d\n", ixl_vc_opcode_str(opcode), opcode, (vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ", vf_num, msg_size); /* This must be a stray msg from a previously destroyed VF. */ if (!(vf->vf_flags & VF_FLAG_ENABLED)) return; switch (opcode) { case VIRTCHNL_OP_VERSION: ixl_vf_version_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_RESET_VF: ixl_vf_reset_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_GET_VF_RESOURCES: ixl_vf_get_resources_msg(pf, vf, msg, msg_size); /* Notify VF of link state after it obtains queues, as this is * the last thing it will do as part of initialization */ ixl_notify_vf_link_state(pf, vf); break; case VIRTCHNL_OP_CONFIG_VSI_QUEUES: ixl_vf_config_vsi_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_CONFIG_IRQ_MAP: ixl_vf_config_irq_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_ENABLE_QUEUES: ixl_vf_enable_queues_msg(pf, vf, msg, msg_size); /* Notify VF of link state after it obtains queues, as this is * the last thing it will do as part of initialization */ ixl_notify_vf_link_state(pf, vf); break; case VIRTCHNL_OP_DISABLE_QUEUES: ixl_vf_disable_queues_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_ADD_ETH_ADDR: ixl_vf_add_mac_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_DEL_ETH_ADDR: ixl_vf_del_mac_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_ADD_VLAN: ixl_vf_add_vlan_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_DEL_VLAN: ixl_vf_del_vlan_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: ixl_vf_config_promisc_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_GET_STATS: ixl_vf_get_stats_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_CONFIG_RSS_KEY: ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_CONFIG_RSS_LUT: ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_SET_RSS_HENA: ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size); break; /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */ case VIRTCHNL_OP_CONFIG_TX_QUEUE: case VIRTCHNL_OP_CONFIG_RX_QUEUE: default: i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED); break; } } /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */ void ixl_handle_vflr(void *arg, int pending) { struct ixl_pf *pf; struct ixl_vf *vf; struct i40e_hw *hw; uint16_t global_vf_num; uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0; int i; pf = arg; hw = &pf->hw; IXL_PF_LOCK(pf); for (i = 0; i < pf->num_vfs; i++) { global_vf_num = hw->func_caps.vf_base_id + i; vf = &pf->vfs[i]; if (!(vf->vf_flags & VF_FLAG_ENABLED)) continue; vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num); vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num); vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index)); if (vflrstat & vflrstat_mask) { wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index), vflrstat_mask); ixl_reinit_vf(pf, vf); } } icr0 = rd32(hw, I40E_PFINT_ICR0_ENA); icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, icr0); ixl_flush(hw); IXL_PF_UNLOCK(pf); } static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err) { switch (err) { case I40E_AQ_RC_EPERM: return (EPERM); case I40E_AQ_RC_ENOENT: return (ENOENT); case I40E_AQ_RC_ESRCH: return (ESRCH); case I40E_AQ_RC_EINTR: return (EINTR); case I40E_AQ_RC_EIO: return (EIO); case I40E_AQ_RC_ENXIO: return (ENXIO); case I40E_AQ_RC_E2BIG: return (E2BIG); case I40E_AQ_RC_EAGAIN: return (EAGAIN); case I40E_AQ_RC_ENOMEM: return (ENOMEM); case I40E_AQ_RC_EACCES: return (EACCES); case I40E_AQ_RC_EFAULT: return (EFAULT); case I40E_AQ_RC_EBUSY: return (EBUSY); case I40E_AQ_RC_EEXIST: return (EEXIST); case I40E_AQ_RC_EINVAL: return (EINVAL); case I40E_AQ_RC_ENOTTY: return (ENOTTY); case I40E_AQ_RC_ENOSPC: return (ENOSPC); case I40E_AQ_RC_ENOSYS: return (ENOSYS); case I40E_AQ_RC_ERANGE: return (ERANGE); case I40E_AQ_RC_EFLUSHED: return (EINVAL); /* No exact equivalent in errno.h */ case I40E_AQ_RC_BAD_ADDR: return (EFAULT); case I40E_AQ_RC_EMODE: return (EPERM); case I40E_AQ_RC_EFBIG: return (EFBIG); default: return (EINVAL); } } int ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params) { struct ixl_pf *pf; struct i40e_hw *hw; struct ixl_vsi *pf_vsi; enum i40e_status_code ret; - int i, error; + int error; pf = device_get_softc(dev); hw = &pf->hw; pf_vsi = &pf->vsi; IXL_PF_LOCK(pf); pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT | M_ZERO); if (pf->vfs == NULL) { error = ENOMEM; goto fail; } - for (i = 0; i < num_vfs; i++) - sysctl_ctx_init(&pf->vfs[i].ctx); - ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid, 1, FALSE, &pf->veb_seid, FALSE, NULL); if (ret != I40E_SUCCESS) { error = ixl_adminq_err_to_errno(hw->aq.asq_last_status); device_printf(dev, "add_veb failed; code=%d error=%d", ret, error); goto fail; } + /* + * Adding a VEB brings back the default MAC filter(s). Remove them, + * and let the driver add the proper filters back. + */ + ixl_del_default_hw_filters(pf_vsi); + ixl_reconfigure_filters(pf_vsi); + pf->num_vfs = num_vfs; IXL_PF_UNLOCK(pf); return (0); fail: free(pf->vfs, M_IXL); pf->vfs = NULL; IXL_PF_UNLOCK(pf); return (error); } void ixl_iov_uninit(device_t dev) { struct ixl_pf *pf; struct i40e_hw *hw; struct ixl_vsi *vsi; struct ifnet *ifp; struct ixl_vf *vfs; int i, num_vfs; pf = device_get_softc(dev); hw = &pf->hw; vsi = &pf->vsi; ifp = vsi->ifp; IXL_PF_LOCK(pf); for (i = 0; i < pf->num_vfs; i++) { if (pf->vfs[i].vsi.seid != 0) i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL); ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag); ixl_free_mac_filters(&pf->vfs[i].vsi); DDPRINTF(dev, "VF %d: %d released\n", i, pf->vfs[i].qtag.num_allocated); DDPRINTF(dev, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr)); } if (pf->veb_seid != 0) { i40e_aq_delete_element(hw, pf->veb_seid, NULL); pf->veb_seid = 0; } vfs = pf->vfs; num_vfs = pf->num_vfs; pf->vfs = NULL; pf->num_vfs = 0; IXL_PF_UNLOCK(pf); /* Do this after the unlock as sysctl_ctx_free might sleep. */ for (i = 0; i < num_vfs; i++) - sysctl_ctx_free(&vfs[i].ctx); + sysctl_ctx_free(&vfs[i].vsi.sysctl_ctx); free(vfs, M_IXL); } static int ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues) { device_t dev = pf->dev; int error; /* Validate, and clamp value if invalid */ if (num_queues < 1 || num_queues > 16) device_printf(dev, "Invalid num-queues (%d) for VF %d\n", num_queues, vf->vf_num); if (num_queues < 1) { device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num); num_queues = 1; } else if (num_queues > 16) { device_printf(dev, "Setting VF %d num-queues to 16\n", vf->vf_num); num_queues = 16; } error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag); if (error) { device_printf(dev, "Error allocating %d queues for VF %d's VSI\n", num_queues, vf->vf_num); return (ENOSPC); } DDPRINTF(dev, "VF %d: %d allocated, %d active", vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active); DDPRINTF(dev, "Unallocated total: %d", ixl_pf_qmgr_get_num_free(&pf->qmgr)); return (0); } int ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) { - char sysctl_name[QUEUE_NAME_LEN]; + char sysctl_name[IXL_QUEUE_NAME_LEN]; struct ixl_pf *pf; struct ixl_vf *vf; const void *mac; size_t size; int error; int vf_num_queues; pf = device_get_softc(dev); vf = &pf->vfs[vfnum]; IXL_PF_LOCK(pf); vf->vf_num = vfnum; - vf->vsi.back = pf; vf->vf_flags = VF_FLAG_ENABLED; - SLIST_INIT(&vf->vsi.ftl); /* Reserve queue allocation from PF */ vf_num_queues = nvlist_get_number(params, "num-queues"); error = ixl_vf_reserve_queues(pf, vf, vf_num_queues); if (error != 0) goto out; error = ixl_vf_setup_vsi(pf, vf); if (error != 0) goto out; if (nvlist_exists_binary(params, "mac-addr")) { mac = nvlist_get_binary(params, "mac-addr", &size); bcopy(mac, vf->mac, ETHER_ADDR_LEN); if (nvlist_get_bool(params, "allow-set-mac")) vf->vf_flags |= VF_FLAG_SET_MAC_CAP; } else /* * If the administrator has not specified a MAC address then * we must allow the VF to choose one. */ vf->vf_flags |= VF_FLAG_SET_MAC_CAP; if (nvlist_get_bool(params, "mac-anti-spoof")) vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF; if (nvlist_get_bool(params, "allow-promisc")) vf->vf_flags |= VF_FLAG_PROMISC_CAP; vf->vf_flags |= VF_FLAG_VLAN_CAP; ixl_reset_vf(pf, vf); out: IXL_PF_UNLOCK(pf); if (error == 0) { snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum); - ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name); + ixl_vsi_add_sysctls(&vf->vsi, sysctl_name, false); } return (error); } Index: releng/11.3/sys/dev/ixl/ixl_pf_iov.h =================================================================== --- releng/11.3/sys/dev/ixl/ixl_pf_iov.h (revision 349180) +++ releng/11.3/sys/dev/ixl/ixl_pf_iov.h (revision 349181) @@ -1,63 +1,63 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _IXL_PF_IOV_H_ #define _IXL_PF_IOV_H_ #include "ixl_pf.h" #include #include #include /* Public functions */ /* * These three are DEVMETHODs required for SR-IOV PF support. */ int ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params); void ixl_iov_uninit(device_t dev); int ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params); /* * The standard PF driver needs to call these during normal execution when * SR-IOV mode is active. */ void ixl_initialize_sriov(struct ixl_pf *pf); void ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event); void ixl_handle_vflr(void *arg, int pending); void ixl_broadcast_link_state(struct ixl_pf *pf); #endif /* _IXL_PF_IOV_H_ */ Index: releng/11.3/sys/dev/ixl/ixl_pf_main.c =================================================================== --- releng/11.3/sys/dev/ixl/ixl_pf_main.c (revision 349180) +++ releng/11.3/sys/dev/ixl/ixl_pf_main.c (revision 349181) @@ -1,6597 +1,6805 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixl_pf.h" #ifdef PCI_IOV #include "ixl_pf_iov.h" #endif #ifdef IXL_IW #include "ixl_iw.h" #include "ixl_iw_int.h" #endif #ifdef DEV_NETMAP -#include -#include -#include +#include #endif /* DEV_NETMAP */ static int ixl_vsi_setup_queue(struct ixl_vsi *, struct ixl_queue *, int); static u64 ixl_max_aq_speed_to_value(u8); static u8 ixl_convert_sysctl_aq_link_speed(u8, bool); static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool); +static enum i40e_status_code ixl_set_lla(struct ixl_vsi *); +static const char * ixl_link_speed_string(u8 link_speed); + /* Sysctls */ static int ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS); +static int ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS); /* Debug Sysctls */ static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS); -#ifdef IXL_DEBUG -static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS); -static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS); -#endif +static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS); + #ifdef IXL_IW extern int ixl_enable_iwarp; extern int ixl_limit_iwarp_msix; #endif const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; const char * const ixl_fc_string[6] = { "None", "Rx", "Tx", "Full", "Priority", "Default" }; static char *ixl_fec_string[3] = { "CL108 RS-FEC", "CL74 FC-FEC/BASE-R", "None" }; MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations"); void ixl_debug_core(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...) { va_list args; if (!(mask & pf->dbg_mask)) return; /* Re-implement device_printf() */ device_print_prettyname(pf->dev); va_start(args, fmt); vprintf(fmt, args); va_end(args); } /* ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string */ void ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf) { u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24); u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF); u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF); sbuf_printf(buf, "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d", hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, hw->aq.api_maj_ver, hw->aq.api_min_ver, (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >> IXL_NVM_VERSION_HI_SHIFT, (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >> IXL_NVM_VERSION_LO_SHIFT, hw->nvm.eetrack, oem_ver, oem_build, oem_patch); } void ixl_print_nvm_version(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *sbuf; sbuf = sbuf_new_auto(); ixl_nvm_version_str(hw, sbuf); sbuf_finish(sbuf); device_printf(dev, "%s\n", sbuf_data(sbuf)); sbuf_delete(sbuf); } +bool +ixl_fw_recovery_mode(struct ixl_pf *pf) +{ + return (rd32(&pf->hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK); +} + static void ixl_configure_tx_itr(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; struct ixl_queue *que = vsi->queues; vsi->tx_itr_setting = pf->tx_itr; for (int i = 0; i < vsi->num_queues; i++, que++) { struct tx_ring *txr = &que->txr; wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i), vsi->tx_itr_setting); txr->itr = vsi->tx_itr_setting; txr->latency = IXL_AVE_LATENCY; } } static void ixl_configure_rx_itr(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; struct ixl_queue *que = vsi->queues; vsi->rx_itr_setting = pf->rx_itr; for (int i = 0; i < vsi->num_queues; i++, que++) { struct rx_ring *rxr = &que->rxr; wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i), vsi->rx_itr_setting); rxr->itr = vsi->rx_itr_setting; rxr->latency = IXL_AVE_LATENCY; } } /* * Write PF ITR values to queue ITR registers. */ void ixl_configure_itr(struct ixl_pf *pf) { ixl_configure_tx_itr(pf); ixl_configure_rx_itr(pf); } - /********************************************************************* * Init entry point * * This routine is used in two ways. It is used by the stack as * init entry point in network interface structure. It is also used * by the driver as a hw/sw initialization routine to get to a * consistent state. * * return 0 on success, positive on failure **********************************************************************/ void ixl_init_locked(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; struct ifnet *ifp = vsi->ifp; device_t dev = pf->dev; struct i40e_filter_control_settings filter; - u8 tmpaddr[ETHER_ADDR_LEN]; - int ret; INIT_DEBUGOUT("ixl_init_locked: begin"); IXL_PF_LOCK_ASSERT(pf); + if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0) { + device_printf(dev, "Running in recovery mode, only firmware update available\n"); + return; + } + ixl_stop_locked(pf); /* * If the aq is dead here, it probably means something outside of the driver * did something to the adapter, like a PF reset. * So rebuild the driver's state here if that occurs. */ if (!i40e_check_asq_alive(&pf->hw)) { device_printf(dev, "Admin Queue is down; resetting...\n"); ixl_teardown_hw_structs(pf); ixl_reset(pf); } /* Get the latest mac address... User might use a LAA */ - bcopy(IF_LLADDR(vsi->ifp), tmpaddr, - ETH_ALEN); - if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && - (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) { - device_printf(dev, "ixl_init_locked: reconfigure MAC addr\n"); - ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); - bcopy(tmpaddr, hw->mac.addr, - ETH_ALEN); - ret = i40e_aq_mac_address_write(hw, - I40E_AQC_WRITE_TYPE_LAA_ONLY, - hw->mac.addr, NULL); - if (ret) { - device_printf(dev, "LLA address" - "change failed!!\n"); - return; - } + if (ixl_set_lla(vsi)) { + device_printf(dev, "LLA address change failed!\n"); + return; } - ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); - /* Set the various hardware offload abilities */ ifp->if_hwassist = 0; if (ifp->if_capenable & IFCAP_TSO) ifp->if_hwassist |= CSUM_TSO; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6); /* Set up the device filtering */ bzero(&filter, sizeof(filter)); filter.enable_ethtype = TRUE; filter.enable_macvlan = TRUE; filter.enable_fdir = FALSE; filter.hash_lut_size = I40E_HASH_LUT_SIZE_512; if (i40e_set_filter_control(hw, &filter)) device_printf(dev, "i40e_set_filter_control() failed\n"); /* Prepare the VSI: rings, hmc contexts, etc... */ if (ixl_initialize_vsi(vsi)) { device_printf(dev, "initialize vsi failed!!\n"); return; } /* Set up RSS */ ixl_config_rss(pf); - /* Add protocol filters to list */ - ixl_init_filters(vsi); - - /* Setup vlan's if needed */ - ixl_setup_vlan_filters(vsi); - /* Set up MSI/X routing and the ITR settings */ if (pf->msix > 1) { ixl_configure_queue_intr_msix(pf); ixl_configure_itr(pf); } else ixl_configure_legacy(pf); ixl_enable_rings(vsi); i40e_aq_set_default_vsi(hw, vsi->seid, NULL); ixl_reconfigure_filters(vsi); + /* Check if PROMISC or ALLMULTI flags have been set + * by user before bringing interface up */ + ixl_set_promisc(vsi); + /* And now turn on interrupts */ ixl_enable_intr(vsi); /* Get link info */ hw->phy.get_link_info = TRUE; i40e_get_link_status(hw, &pf->link_up); ixl_update_link_status(pf); - /* Start the local timer */ - callout_reset(&pf->timer, hz, ixl_local_timer, pf); - /* Now inform the stack we're ready */ ifp->if_drv_flags |= IFF_DRV_RUNNING; #ifdef IXL_IW if (ixl_enable_iwarp && pf->iw_enabled) { - ret = ixl_iw_pf_init(pf); + int ret = ixl_iw_pf_init(pf); if (ret) device_printf(dev, "initialize iwarp failed, code %d\n", ret); } #endif } /********************************************************************* * * Get the hardware capabilities * **********************************************************************/ int ixl_get_hw_capabilities(struct ixl_pf *pf) { struct i40e_aqc_list_capabilities_element_resp *buf; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; - int error, len; + int error, len, i2c_intfc_num; u16 needed; bool again = TRUE; + if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0) { + hw->func_caps.iwarp = 0; + return 0; + } + len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); retry: if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *) malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate cap memory\n"); return (ENOMEM); } /* This populates the hw struct */ error = i40e_aq_discover_capabilities(hw, buf, len, &needed, i40e_aqc_opc_list_func_capabilities, NULL); free(buf, M_DEVBUF); if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) && (again == TRUE)) { /* retry once with a larger buffer */ again = FALSE; len = needed; goto retry; } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { device_printf(dev, "capability discovery failed: %d\n", pf->hw.aq.asq_last_status); return (ENODEV); } - /* Capture this PF's starting queue pair */ - pf->qbase = hw->func_caps.base_queue; - #ifdef IXL_DEBUG device_printf(dev, "pf_id=%d, num_vfs=%d, msix_pf=%d, " "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n", hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors, hw->func_caps.num_msix_vectors_vf, hw->func_caps.fd_filters_guaranteed, hw->func_caps.fd_filters_best_effort, hw->func_caps.num_tx_qp, hw->func_caps.num_rx_qp, hw->func_caps.base_queue); #endif - struct i40e_osdep *osdep = (struct i40e_osdep *)hw->back; - osdep->i2c_intfc_num = ixl_find_i2c_interface(pf); - if (osdep->i2c_intfc_num != -1) + /* + * Some devices have both MDIO and I2C; since this isn't reported + * by the FW, check registers to see if an I2C interface exists. + */ + i2c_intfc_num = ixl_find_i2c_interface(pf); + if (i2c_intfc_num != -1) pf->has_i2c = true; + /* Determine functions to use for driver I2C accesses */ + switch (pf->i2c_access_method) { + case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: { + if (hw->mac.type == I40E_MAC_XL710 && + hw->aq.api_maj_ver == 1 && + hw->aq.api_min_ver >= 7) { + pf->read_i2c_byte = ixl_read_i2c_byte_aq; + pf->write_i2c_byte = ixl_write_i2c_byte_aq; + } else { + pf->read_i2c_byte = ixl_read_i2c_byte_reg; + pf->write_i2c_byte = ixl_write_i2c_byte_reg; + } + break; + } + case IXL_I2C_ACCESS_METHOD_AQ: + pf->read_i2c_byte = ixl_read_i2c_byte_aq; + pf->write_i2c_byte = ixl_write_i2c_byte_aq; + break; + case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD: + pf->read_i2c_byte = ixl_read_i2c_byte_reg; + pf->write_i2c_byte = ixl_write_i2c_byte_reg; + break; + case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS: + pf->read_i2c_byte = ixl_read_i2c_byte_bb; + pf->write_i2c_byte = ixl_write_i2c_byte_bb; + break; + default: + /* Should not happen */ + device_printf(dev, "Error setting I2C access functions\n"); + break; + } + /* Print a subset of the capability information. */ device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n", hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors, hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp, (hw->func_caps.mdio_port_mode == 2) ? "I2C" : (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" : (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" : "MDIO shared"); return (error); } void ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask) { device_t dev = vsi->dev; /* Enable/disable TXCSUM/TSO4 */ if (!(ifp->if_capenable & IFCAP_TXCSUM) && !(ifp->if_capenable & IFCAP_TSO4)) { if (mask & IFCAP_TXCSUM) { ifp->if_capenable |= IFCAP_TXCSUM; /* enable TXCSUM, restore TSO if previously enabled */ if (vsi->flags & IXL_FLAGS_KEEP_TSO4) { vsi->flags &= ~IXL_FLAGS_KEEP_TSO4; ifp->if_capenable |= IFCAP_TSO4; } } else if (mask & IFCAP_TSO4) { ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4); vsi->flags &= ~IXL_FLAGS_KEEP_TSO4; device_printf(dev, "TSO4 requires txcsum, enabling both...\n"); } } else if((ifp->if_capenable & IFCAP_TXCSUM) && !(ifp->if_capenable & IFCAP_TSO4)) { if (mask & IFCAP_TXCSUM) ifp->if_capenable &= ~IFCAP_TXCSUM; else if (mask & IFCAP_TSO4) ifp->if_capenable |= IFCAP_TSO4; } else if((ifp->if_capenable & IFCAP_TXCSUM) && (ifp->if_capenable & IFCAP_TSO4)) { if (mask & IFCAP_TXCSUM) { vsi->flags |= IXL_FLAGS_KEEP_TSO4; ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4); device_printf(dev, "TSO4 requires txcsum, disabling both...\n"); } else if (mask & IFCAP_TSO4) ifp->if_capenable &= ~IFCAP_TSO4; } /* Enable/disable TXCSUM_IPV6/TSO6 */ if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6) && !(ifp->if_capenable & IFCAP_TSO6)) { if (mask & IFCAP_TXCSUM_IPV6) { ifp->if_capenable |= IFCAP_TXCSUM_IPV6; if (vsi->flags & IXL_FLAGS_KEEP_TSO6) { vsi->flags &= ~IXL_FLAGS_KEEP_TSO6; ifp->if_capenable |= IFCAP_TSO6; } } else if (mask & IFCAP_TSO6) { ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); vsi->flags &= ~IXL_FLAGS_KEEP_TSO6; device_printf(dev, "TSO6 requires txcsum6, enabling both...\n"); } } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6) && !(ifp->if_capenable & IFCAP_TSO6)) { if (mask & IFCAP_TXCSUM_IPV6) ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6; else if (mask & IFCAP_TSO6) ifp->if_capenable |= IFCAP_TSO6; } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6) && (ifp->if_capenable & IFCAP_TSO6)) { if (mask & IFCAP_TXCSUM_IPV6) { vsi->flags |= IXL_FLAGS_KEEP_TSO6; ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); device_printf(dev, "TSO6 requires txcsum6, disabling both...\n"); } else if (mask & IFCAP_TSO6) ifp->if_capenable &= ~IFCAP_TSO6; } } /* For the set_advertise sysctl */ void ixl_set_initial_advertised_speeds(struct ixl_pf *pf) { device_t dev = pf->dev; int err; /* Make sure to initialize the device to the complete list of * supported speeds on driver load, to ensure unloading and * reloading the driver will restore this value. */ err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true); if (err) { /* Non-fatal error */ device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n", __func__, err); return; } pf->advertised_speed = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); } int ixl_teardown_hw_structs(struct ixl_pf *pf) { enum i40e_status_code status = 0; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; /* Shutdown LAN HMC */ if (hw->hmc.hmc_obj) { status = i40e_shutdown_lan_hmc(hw); if (status) { device_printf(dev, "init: LAN HMC shutdown failure; status %d\n", status); goto err_out; } } /* Shutdown admin queue */ ixl_disable_intr0(hw); status = i40e_shutdown_adminq(hw); if (status) device_printf(dev, "init: Admin Queue shutdown failure; status %d\n", status); err_out: return (status); } int ixl_reset(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; u8 set_fc_err_mask; int error = 0; // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary i40e_clear_hw(hw); error = i40e_pf_reset(hw); if (error) { device_printf(dev, "init: PF reset failure\n"); error = EIO; goto err_out; } error = i40e_init_adminq(hw); if (error) { device_printf(dev, "init: Admin queue init failure;" " status code %d\n", error); error = EIO; goto err_out; } i40e_clear_pxe_mode(hw); error = ixl_get_hw_capabilities(pf); if (error) { device_printf(dev, "init: Error retrieving HW capabilities;" " status code %d\n", error); goto err_out; } error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, hw->func_caps.num_rx_qp, 0, 0); if (error) { device_printf(dev, "init: LAN HMC init failed; status code %d\n", error); error = EIO; goto err_out; } error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); if (error) { device_printf(dev, "init: LAN HMC config failed; status code %d\n", error); error = EIO; goto err_out; } // XXX: possible fix for panic, but our failure recovery is still broken error = ixl_switch_config(pf); if (error) { device_printf(dev, "init: ixl_switch_config() failed: %d\n", error); goto err_out; } error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK, NULL); if (error) { device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d," " aq_err %d\n", error, hw->aq.asq_last_status); error = EIO; goto err_out; } error = i40e_set_fc(hw, &set_fc_err_mask, true); if (error) { device_printf(dev, "init: setting link flow control failed; retcode %d," " fc_err_mask 0x%02x\n", error, set_fc_err_mask); goto err_out; } // XXX: (Rebuild VSIs?) /* Firmware delay workaround */ if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || (hw->aq.fw_maj_ver < 4)) { i40e_msec_delay(75); error = i40e_aq_set_link_restart_an(hw, TRUE, NULL); if (error) { device_printf(dev, "init: link restart failed, aq_err %d\n", hw->aq.asq_last_status); goto err_out; } } /* Re-enable admin queue interrupt */ if (pf->msix > 1) { ixl_configure_intr0_msix(pf); ixl_enable_intr0(hw); } err_out: return (error); } /* ** MSIX Interrupt Handlers and Tasklets */ void ixl_handle_que(void *context, int pending) { struct ixl_queue *que = context; struct ixl_vsi *vsi = que->vsi; struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct i40e_hw *hw = vsi->hw; struct tx_ring *txr = &que->txr; struct ifnet *ifp = vsi->ifp; bool more; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { more = ixl_rxeof(que, IXL_RX_LIMIT); IXL_TX_LOCK(txr); ixl_txeof(que); if (!drbr_empty(ifp, txr->br)) ixl_mq_start_locked(ifp, txr); IXL_TX_UNLOCK(txr); if (more) { taskqueue_enqueue(que->tq, &que->task); return; } } /* Re-enable queue interrupt */ if (pf->msix > 1) ixl_enable_queue(hw, que->me); else ixl_enable_intr0(hw); } /********************************************************************* * * Legacy Interrupt Service routine * **********************************************************************/ void ixl_intr(void *arg) { struct ixl_pf *pf = arg; struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; struct ixl_queue *que = vsi->queues; struct ifnet *ifp = vsi->ifp; struct tx_ring *txr = &que->txr; u32 icr0; bool more; + ixl_disable_intr0(hw); + pf->admin_irq++; /* Clear PBA at start of ISR if using legacy interrupts */ if (pf->msix == 0) wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)); icr0 = rd32(hw, I40E_PFINT_ICR0); #ifdef PCI_IOV if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) taskqueue_enqueue(pf->tq, &pf->vflr_task); #endif if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) taskqueue_enqueue(pf->tq, &pf->adminq); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ++que->irqs; more = ixl_rxeof(que, IXL_RX_LIMIT); IXL_TX_LOCK(txr); ixl_txeof(que); if (!drbr_empty(vsi->ifp, txr->br)) ixl_mq_start_locked(ifp, txr); IXL_TX_UNLOCK(txr); if (more) taskqueue_enqueue(que->tq, &que->task); } ixl_enable_intr0(hw); } /********************************************************************* * * MSIX VSI Interrupt Service routine * **********************************************************************/ void ixl_msix_que(void *arg) { struct ixl_queue *que = arg; struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct tx_ring *txr = &que->txr; bool more_tx, more_rx; /* Protect against spurious interrupts */ if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)) return; + /* There are drivers which disable auto-masking of interrupts, + * which is a global setting for all ports. We have to make sure + * to mask it to not lose IRQs */ + ixl_disable_queue(hw, que->me); + ++que->irqs; more_rx = ixl_rxeof(que, IXL_RX_LIMIT); IXL_TX_LOCK(txr); more_tx = ixl_txeof(que); /* ** Make certain that if the stack ** has anything queued the task gets ** scheduled to handle it. */ if (!drbr_empty(vsi->ifp, txr->br)) more_tx = 1; IXL_TX_UNLOCK(txr); ixl_set_queue_rx_itr(que); ixl_set_queue_tx_itr(que); if (more_tx || more_rx) taskqueue_enqueue(que->tq, &que->task); else ixl_enable_queue(hw, que->me); return; } /********************************************************************* * * MSIX Admin Queue Interrupt Service routine * **********************************************************************/ void ixl_msix_adminq(void *arg) { struct ixl_pf *pf = arg; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; u32 reg, mask, rstat_reg; bool do_task = FALSE; ++pf->admin_irq; reg = rd32(hw, I40E_PFINT_ICR0); mask = rd32(hw, I40E_PFINT_ICR0_ENA); /* Check on the cause */ if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) { mask &= ~I40E_PFINT_ICR0_ADMINQ_MASK; do_task = TRUE; } if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) { ixl_handle_mdd_event(pf); mask &= ~I40E_PFINT_ICR0_MAL_DETECT_MASK; } if (reg & I40E_PFINT_ICR0_GRST_MASK) { device_printf(dev, "Reset Requested!\n"); rstat_reg = rd32(hw, I40E_GLGEN_RSTAT); rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; device_printf(dev, "Reset type: "); switch (rstat_reg) { /* These others might be handled similarly to an EMPR reset */ case I40E_RESET_CORER: printf("CORER\n"); break; case I40E_RESET_GLOBR: printf("GLOBR\n"); break; case I40E_RESET_EMPR: printf("EMPR\n"); atomic_set_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING); break; default: printf("POR\n"); break; } /* overload admin queue task to check reset progress */ do_task = TRUE; } if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) { device_printf(dev, "ECC Error detected!\n"); } if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) { reg = rd32(hw, I40E_PFHMC_ERRORINFO); if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) { device_printf(dev, "HMC Error detected!\n"); device_printf(dev, "INFO 0x%08x\n", reg); reg = rd32(hw, I40E_PFHMC_ERRORDATA); device_printf(dev, "DATA 0x%08x\n", reg); wr32(hw, I40E_PFHMC_ERRORINFO, 0); } } if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) { device_printf(dev, "PCI Exception detected!\n"); } #ifdef PCI_IOV if (reg & I40E_PFINT_ICR0_VFLR_MASK) { mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; taskqueue_enqueue(pf->tq, &pf->vflr_task); } #endif if (do_task) taskqueue_enqueue(pf->tq, &pf->adminq); else ixl_enable_intr0(hw); } void ixl_set_promisc(struct ixl_vsi *vsi) { struct ifnet *ifp = vsi->ifp; struct i40e_hw *hw = vsi->hw; int err, mcnt = 0; bool uni = FALSE, multi = FALSE; if (ifp->if_flags & IFF_PROMISC) uni = multi = TRUE; else if (ifp->if_flags & IFF_ALLMULTI) multi = TRUE; else { /* Need to count the multicast addresses */ struct ifmultiaddr *ifma; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (mcnt == MAX_MULTICAST_ADDR) { multi = TRUE; break; } mcnt++; } if_maddr_runlock(ifp); } err = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, uni, NULL, TRUE); err = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, multi, NULL); return; } /********************************************************************* * Filter Routines * * Routines for multicast and vlan filter management. * *********************************************************************/ void ixl_add_multi(struct ixl_vsi *vsi) { - struct ifmultiaddr *ifma; + struct ifmultiaddr *ifma; struct ifnet *ifp = vsi->ifp; struct i40e_hw *hw = vsi->hw; int mcnt = 0, flags; IOCTL_DEBUGOUT("ixl_add_multi: begin"); if_maddr_rlock(ifp); /* ** First just get a count, to decide if we ** we simply use multicast promiscuous. */ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; mcnt++; } if_maddr_runlock(ifp); if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { /* delete existing MC filters */ ixl_del_hw_filters(vsi, mcnt); i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL); return; } mcnt = 0; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; ixl_add_mc_filter(vsi, (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr)); mcnt++; } if_maddr_runlock(ifp); if (mcnt > 0) { flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC); ixl_add_hw_filters(vsi, flags, mcnt); } IOCTL_DEBUGOUT("ixl_add_multi: end"); return; } void ixl_del_multi(struct ixl_vsi *vsi) { struct ifnet *ifp = vsi->ifp; struct ifmultiaddr *ifma; struct ixl_mac_filter *f; int mcnt = 0; bool match = FALSE; IOCTL_DEBUGOUT("ixl_del_multi: begin"); /* Search for removed multicast addresses */ if_maddr_rlock(ifp); SLIST_FOREACH(f, &vsi->ftl, next) { if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) { match = FALSE; TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr); if (cmp_etheraddr(f->macaddr, mc_addr)) { match = TRUE; break; } } if (match == FALSE) { f->flags |= IXL_FILTER_DEL; mcnt++; } } } if_maddr_runlock(ifp); if (mcnt > 0) ixl_del_hw_filters(vsi, mcnt); } /********************************************************************* * Timer routine * * This routine checks for link status, updates statistics, * and runs the watchdog check. * - * Only runs when the driver is configured UP and RUNNING. - * **********************************************************************/ void ixl_local_timer(void *arg) { struct ixl_pf *pf = arg; + struct ifnet *ifp = pf->vsi.ifp; + if (ixl_fw_recovery_mode(pf)) { + if (!(atomic_load_acq_int(&pf->state) & IXL_PF_STATE_RECOVERY_MODE)) { + if (ifp->if_drv_flags & IFF_DRV_RUNNING) + ixl_stop_locked(pf); + atomic_set_int(&pf->state, IXL_PF_STATE_RECOVERY_MODE | IXL_PF_STATE_EMPR_RESETTING); + device_printf(pf->dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); + } + } + IXL_PF_LOCK_ASSERT(pf); /* Fire off the adminq task */ taskqueue_enqueue(pf->tq, &pf->adminq); - /* Update stats */ - ixl_update_stats_counters(pf); + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + /* Update stats */ + ixl_update_stats_counters(pf); + } - /* Increment stat when a queue shows hung */ - if (ixl_queue_hang_check(&pf->vsi)) + if (ixl_queue_hang_check(&pf->vsi)) { + /* Increment stat when a queue shows hung */ pf->watchdog_events++; + } callout_reset(&pf->timer, hz, ixl_local_timer, pf); } void ixl_link_up_msg(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ifnet *ifp = pf->vsi.ifp; char *req_fec_string, *neg_fec_string; u8 fec_abilities; fec_abilities = hw->phy.link_info.req_fec_info; /* If both RS and KR are requested, only show RS */ if (fec_abilities & I40E_AQ_REQUEST_FEC_RS) req_fec_string = ixl_fec_string[0]; else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR) req_fec_string = ixl_fec_string[1]; else req_fec_string = ixl_fec_string[2]; if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) neg_fec_string = ixl_fec_string[0]; else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) neg_fec_string = ixl_fec_string[1]; else neg_fec_string = ixl_fec_string[2]; log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", ifp->if_xname, - ixl_aq_speed_to_str(hw->phy.link_info.link_speed), + ixl_link_speed_string(hw->phy.link_info.link_speed), req_fec_string, neg_fec_string, (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False", (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX && hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ? ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? ixl_fc_string[1] : ixl_fc_string[0]); } /* ** Note: this routine updates the OS on the link state ** the real check of the hardware only happens with ** a link interrupt. */ void ixl_update_link_status(struct ixl_pf *pf) { struct ixl_vsi *vsi = &pf->vsi; struct ifnet *ifp = vsi->ifp; device_t dev = pf->dev; if (pf->link_up) { if (vsi->link_active == FALSE) { vsi->link_active = TRUE; #if __FreeBSD_version >= 1100000 ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->link_speed); #else if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->link_speed)); #endif if_link_state_change(ifp, LINK_STATE_UP); ixl_link_up_msg(pf); #ifdef PCI_IOV ixl_broadcast_link_state(pf); #endif } } else { /* Link down */ if (vsi->link_active == TRUE) { if (bootverbose) device_printf(dev, "Link is Down\n"); if_link_state_change(ifp, LINK_STATE_DOWN); vsi->link_active = FALSE; #ifdef PCI_IOV ixl_broadcast_link_state(pf); #endif } } } /********************************************************************* * * This routine disables all traffic on the adapter by issuing a * global reset on the MAC and deallocates TX/RX buffers. * **********************************************************************/ void ixl_stop_locked(struct ixl_pf *pf) { struct ixl_vsi *vsi = &pf->vsi; struct ifnet *ifp = vsi->ifp; INIT_DEBUGOUT("ixl_stop: begin\n"); IXL_PF_LOCK_ASSERT(pf); + /* Tell the stack that the interface is no longer active */ + ifp->if_drv_flags &= ~(IFF_DRV_RUNNING); + #ifdef IXL_IW /* Stop iWARP device */ if (ixl_enable_iwarp && pf->iw_enabled) ixl_iw_pf_stop(pf); #endif - /* Stop the local timer */ - callout_stop(&pf->timer); - ixl_disable_rings_intr(vsi); ixl_disable_rings(vsi); - - /* Tell the stack that the interface is no longer active */ - ifp->if_drv_flags &= ~(IFF_DRV_RUNNING); } void ixl_stop(struct ixl_pf *pf) { IXL_PF_LOCK(pf); ixl_stop_locked(pf); IXL_PF_UNLOCK(pf); } /********************************************************************* * * Setup MSIX Interrupt resources and handlers for the VSI * **********************************************************************/ int ixl_setup_legacy(struct ixl_pf *pf) { device_t dev = pf->dev; int error, rid = 0; if (pf->msix == 1) rid = 1; pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (pf->res == NULL) { device_printf(dev, "bus_alloc_resource_any() for" " legacy/msi interrupt\n"); return (ENXIO); } /* Set the handler function */ error = bus_setup_intr(dev, pf->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, ixl_intr, pf, &pf->tag); if (error) { pf->res = NULL; device_printf(dev, "bus_setup_intr() for legacy/msi" " interrupt handler failed, error %d\n", error); return (ENXIO); } error = bus_describe_intr(dev, pf->res, pf->tag, "irq"); if (error) { /* non-fatal */ device_printf(dev, "bus_describe_intr() for Admin Queue" " interrupt name failed, error %d\n", error); } return (0); } int ixl_setup_adminq_tq(struct ixl_pf *pf) { device_t dev = pf->dev; int error = 0; /* Tasklet for Admin Queue interrupts */ TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf); #ifdef PCI_IOV /* VFLR Tasklet */ TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf); #endif /* Create and start Admin Queue taskqueue */ pf->tq = taskqueue_create_fast("ixl_aq", M_NOWAIT, taskqueue_thread_enqueue, &pf->tq); if (!pf->tq) { device_printf(dev, "taskqueue_create_fast (for AQ) returned NULL!\n"); return (ENOMEM); } error = taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s aq", device_get_nameunit(dev)); if (error) { device_printf(dev, "taskqueue_start_threads (for AQ) error: %d\n", error); taskqueue_free(pf->tq); return (error); } return (0); } int ixl_setup_queue_tqs(struct ixl_vsi *vsi) { struct ixl_queue *que = vsi->queues; device_t dev = vsi->dev; #ifdef RSS int cpu_id = 0; cpuset_t cpu_mask; #endif /* Create queue tasks and start queue taskqueues */ for (int i = 0; i < vsi->num_queues; i++, que++) { TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que); TASK_INIT(&que->task, 0, ixl_handle_que, que); que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT, taskqueue_thread_enqueue, &que->tq); #ifdef RSS CPU_SETOF(cpu_id, &cpu_mask); taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET, &cpu_mask, "%s (bucket %d)", device_get_nameunit(dev), cpu_id); #else taskqueue_start_threads(&que->tq, 1, PI_NET, "%s (que %d)", device_get_nameunit(dev), que->me); #endif } return (0); } void ixl_free_adminq_tq(struct ixl_pf *pf) { if (pf->tq) { taskqueue_free(pf->tq); pf->tq = NULL; } } void ixl_free_queue_tqs(struct ixl_vsi *vsi) { struct ixl_queue *que = vsi->queues; for (int i = 0; i < vsi->num_queues; i++, que++) { if (que->tq) { taskqueue_free(que->tq); que->tq = NULL; } } } int ixl_setup_adminq_msix(struct ixl_pf *pf) { device_t dev = pf->dev; int rid, error = 0; /* Admin IRQ rid is 1, vector is 0 */ rid = 1; /* Get interrupt resource from bus */ pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (!pf->res) { device_printf(dev, "bus_alloc_resource_any() for Admin Queue" " interrupt failed [rid=%d]\n", rid); return (ENXIO); } /* Then associate interrupt with handler */ error = bus_setup_intr(dev, pf->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, ixl_msix_adminq, pf, &pf->tag); if (error) { pf->res = NULL; device_printf(dev, "bus_setup_intr() for Admin Queue" " interrupt handler failed, error %d\n", error); return (ENXIO); } error = bus_describe_intr(dev, pf->res, pf->tag, "aq"); if (error) { /* non-fatal */ device_printf(dev, "bus_describe_intr() for Admin Queue" " interrupt name failed, error %d\n", error); } pf->admvec = 0; return (0); } /* * Allocate interrupt resources from bus and associate an interrupt handler * to those for the VSI's queues. */ int ixl_setup_queue_msix(struct ixl_vsi *vsi) { device_t dev = vsi->dev; struct ixl_queue *que = vsi->queues; struct tx_ring *txr; int error, rid, vector = 1; /* Queue interrupt vector numbers start at 1 (adminq intr is 0) */ for (int i = 0; i < vsi->num_queues; i++, vector++, que++) { int cpu_id = i; rid = vector + 1; txr = &que->txr; que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (!que->res) { device_printf(dev, "bus_alloc_resource_any() for" " Queue %d interrupt failed [rid=%d]\n", que->me, rid); return (ENXIO); } /* Set the handler function */ error = bus_setup_intr(dev, que->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, ixl_msix_que, que, &que->tag); if (error) { device_printf(dev, "bus_setup_intr() for Queue %d" " interrupt handler failed, error %d\n", que->me, error); bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); return (error); } error = bus_describe_intr(dev, que->res, que->tag, "q%d", i); if (error) { device_printf(dev, "bus_describe_intr() for Queue %d" " interrupt name failed, error %d\n", que->me, error); } /* Bind the vector to a CPU */ #ifdef RSS cpu_id = rss_getcpu(i % rss_getnumbuckets()); #endif error = bus_bind_intr(dev, que->res, cpu_id); if (error) { device_printf(dev, "bus_bind_intr() for Queue %d" " to CPU %d failed, error %d\n", que->me, cpu_id, error); } que->msix = vector; } return (0); } /* * Allocate MSI/X vectors from the OS. * Returns 0 for legacy, 1 for MSI, >1 for MSIX. */ int ixl_init_msix(struct ixl_pf *pf) { device_t dev = pf->dev; struct i40e_hw *hw = &pf->hw; #ifdef IXL_IW #if __FreeBSD_version >= 1100000 cpuset_t cpu_set; #endif #endif int auto_max_queues; int rid, want, vectors, queues, available; #ifdef IXL_IW int iw_want=0, iw_vectors; pf->iw_msix = 0; #endif /* Override by tuneable */ if (!pf->enable_msix) goto no_msix; /* First try MSI/X */ rid = PCIR_BAR(IXL_MSIX_BAR); pf->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!pf->msix_mem) { /* May not be enabled */ device_printf(pf->dev, "Unable to map MSIX table\n"); goto no_msix; } available = pci_msix_count(dev); if (available < 2) { /* system has msix disabled (0), or only one vector (1) */ device_printf(pf->dev, "Less than two MSI-X vectors available\n"); bus_release_resource(dev, SYS_RES_MEMORY, rid, pf->msix_mem); pf->msix_mem = NULL; goto no_msix; } /* Clamp max number of queues based on: * - # of MSI-X vectors available * - # of cpus available * - # of queues that can be assigned to the LAN VSI */ auto_max_queues = min(mp_ncpus, available - 1); if (hw->mac.type == I40E_MAC_X722) auto_max_queues = min(auto_max_queues, 128); else auto_max_queues = min(auto_max_queues, 64); /* Override with tunable value if tunable is less than autoconfig count */ if ((pf->max_queues != 0) && (pf->max_queues <= auto_max_queues)) queues = pf->max_queues; /* Use autoconfig amount if that's lower */ else if ((pf->max_queues != 0) && (pf->max_queues > auto_max_queues)) { device_printf(dev, "ixl_max_queues (%d) is too large, using " "autoconfig amount (%d)...\n", pf->max_queues, auto_max_queues); queues = auto_max_queues; } /* Limit maximum auto-configured queues to 8 if no user value is set */ else queues = min(auto_max_queues, 8); #ifdef RSS /* If we're doing RSS, clamp at the number of RSS buckets */ if (queues > rss_getnumbuckets()) queues = rss_getnumbuckets(); #endif /* ** Want one vector (RX/TX pair) per queue ** plus an additional for the admin queue. */ want = queues + 1; if (want <= available) /* Have enough */ vectors = want; else { device_printf(pf->dev, "MSIX Configuration Problem, " "%d vectors available but %d wanted!\n", available, want); pf->msix_mem = NULL; goto no_msix; /* Will go to Legacy setup */ } #ifdef IXL_IW if (ixl_enable_iwarp && hw->func_caps.iwarp) { #if __FreeBSD_version >= 1100000 if(bus_get_cpus(dev, INTR_CPUS, sizeof(cpu_set), &cpu_set) == 0) { iw_want = min(CPU_COUNT(&cpu_set), IXL_IW_MAX_MSIX); } #endif if(!iw_want) iw_want = min(mp_ncpus, IXL_IW_MAX_MSIX); if(ixl_limit_iwarp_msix > 0) iw_want = min(iw_want, ixl_limit_iwarp_msix); else iw_want = min(iw_want, 1); available -= vectors; if (available > 0) { iw_vectors = (available >= iw_want) ? iw_want : available; vectors += iw_vectors; } else iw_vectors = 0; } #endif ixl_set_msix_enable(dev); if (pci_alloc_msix(dev, &vectors) == 0) { device_printf(pf->dev, "Using MSIX interrupts with %d vectors\n", vectors); pf->msix = vectors; #ifdef IXL_IW if (ixl_enable_iwarp && hw->func_caps.iwarp) { pf->iw_msix = iw_vectors; device_printf(pf->dev, "Reserving %d MSIX interrupts for iWARP CEQ and AEQ\n", iw_vectors); } #endif pf->vsi.num_queues = queues; #ifdef RSS /* * If we're doing RSS, the number of queues needs to * match the number of RSS buckets that are configured. * * + If there's more queues than RSS buckets, we'll end * up with queues that get no traffic. * * + If there's more RSS buckets than queues, we'll end * up having multiple RSS buckets map to the same queue, * so there'll be some contention. */ if (queues != rss_getnumbuckets()) { device_printf(dev, "%s: queues (%d) != RSS buckets (%d)" "; performance will be impacted.\n", __func__, queues, rss_getnumbuckets()); } #endif return (vectors); } no_msix: vectors = pci_msi_count(dev); pf->vsi.num_queues = 1; pf->max_queues = 1; if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) device_printf(pf->dev, "Using an MSI interrupt\n"); else { vectors = 0; device_printf(pf->dev, "Using a Legacy interrupt\n"); } return (vectors); } /* * Configure admin queue/misc interrupt cause registers in hardware. */ void ixl_configure_intr0_msix(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 reg; /* First set up the adminq - vector 0 */ wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ rd32(hw, I40E_PFINT_ICR0); /* read to clear */ reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | I40E_PFINT_ICR0_ENA_GRST_MASK | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | I40E_PFINT_ICR0_ENA_ADMINQ_MASK | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | I40E_PFINT_ICR0_ENA_VFLR_MASK | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); /* * 0x7FF is the end of the queue list. * This means we won't use MSI-X vector 0 for a queue interrupt * in MSIX mode. */ wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */ wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E); wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); wr32(hw, I40E_PFINT_STAT_CTL0, 0); } /* * Configure queue interrupt cause registers in hardware. */ void ixl_configure_queue_intr_msix(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; u32 reg; u16 vector = 1; for (int i = 0; i < vsi->num_queues; i++, vector++) { wr32(hw, I40E_PFINT_DYN_CTLN(i), 0); /* First queue type is RX / 0 */ wr32(hw, I40E_PFINT_LNKLSTN(i), i); reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); wr32(hw, I40E_QINT_RQCTL(i), reg); reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); wr32(hw, I40E_QINT_TQCTL(i), reg); } } /* * Configure for MSI single vector operation */ void ixl_configure_legacy(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; struct ixl_queue *que = vsi->queues; struct rx_ring *rxr = &que->rxr; struct tx_ring *txr = &que->txr; u32 reg; /* Configure ITR */ vsi->tx_itr_setting = pf->tx_itr; wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR), vsi->tx_itr_setting); txr->itr = vsi->tx_itr_setting; vsi->rx_itr_setting = pf->rx_itr; wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), vsi->rx_itr_setting); rxr->itr = vsi->rx_itr_setting; /* Setup "other" causes */ reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | I40E_PFINT_ICR0_ENA_GRST_MASK | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | I40E_PFINT_ICR0_ENA_VFLR_MASK | I40E_PFINT_ICR0_ENA_ADMINQ_MASK ; wr32(hw, I40E_PFINT_ICR0_ENA, reg); /* No ITR for non-queue interrupts */ wr32(hw, I40E_PFINT_STAT_CTL0, IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT); /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ wr32(hw, I40E_PFINT_LNKLST0, 0); /* Associate the queue pair to the vector and enable the q int */ reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); wr32(hw, I40E_QINT_RQCTL(0), reg); reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); wr32(hw, I40E_QINT_TQCTL(0), reg); } int ixl_allocate_pci_resources(struct ixl_pf *pf) { int rid; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; /* Map BAR0 */ rid = PCIR_BAR(0); pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!(pf->pci_mem)) { device_printf(dev, "Unable to allocate bus resource: PCI memory\n"); return (ENXIO); } /* Ensure proper PCI device operation */ ixl_set_busmaster(dev); /* Save off the PCI information */ hw->vendor_id = pci_get_vendor(dev); hw->device_id = pci_get_device(dev); hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); hw->subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); hw->subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); hw->bus.device = pci_get_slot(dev); hw->bus.func = pci_get_function(dev); /* Save off register access information */ pf->osdep.mem_bus_space_tag = rman_get_bustag(pf->pci_mem); pf->osdep.mem_bus_space_handle = rman_get_bushandle(pf->pci_mem); pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem); pf->osdep.flush_reg = I40E_GLGEN_STAT; pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle; pf->hw.back = &pf->osdep; return (0); } /* * Teardown and release the admin queue/misc vector * interrupt. */ int ixl_teardown_adminq_msix(struct ixl_pf *pf) { device_t dev = pf->dev; int rid, error = 0; if (pf->admvec) /* we are doing MSIX */ rid = pf->admvec + 1; else (pf->msix != 0) ? (rid = 1):(rid = 0); if (pf->tag != NULL) { bus_teardown_intr(dev, pf->res, pf->tag); if (error) { device_printf(dev, "bus_teardown_intr() for" " interrupt 0 failed\n"); // return (ENXIO); } pf->tag = NULL; } if (pf->res != NULL) { bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res); if (error) { device_printf(dev, "bus_release_resource() for" " interrupt 0 failed [rid=%d]\n", rid); // return (ENXIO); } pf->res = NULL; } return (0); } int ixl_teardown_queue_msix(struct ixl_vsi *vsi) { struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct ixl_queue *que = vsi->queues; device_t dev = vsi->dev; int rid, error = 0; /* We may get here before stations are setup */ if ((pf->msix < 2) || (que == NULL)) return (0); /* Release all MSIX queue resources */ for (int i = 0; i < vsi->num_queues; i++, que++) { rid = que->msix + 1; if (que->tag != NULL) { error = bus_teardown_intr(dev, que->res, que->tag); if (error) { device_printf(dev, "bus_teardown_intr() for" " Queue %d interrupt failed\n", que->me); // return (ENXIO); } que->tag = NULL; } if (que->res != NULL) { error = bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); if (error) { device_printf(dev, "bus_release_resource() for" " Queue %d interrupt failed [rid=%d]\n", que->me, rid); // return (ENXIO); } que->res = NULL; } } return (0); } void ixl_free_pci_resources(struct ixl_pf *pf) { device_t dev = pf->dev; int memrid; ixl_teardown_queue_msix(&pf->vsi); ixl_teardown_adminq_msix(pf); if (pf->msix > 0) pci_release_msi(dev); memrid = PCIR_BAR(IXL_MSIX_BAR); if (pf->msix_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, memrid, pf->msix_mem); if (pf->pci_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), pf->pci_mem); return; } void ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types) { /* Display supported media types */ if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL); + if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T)) + ifmedia_add(&vsi->media, IFM_ETHER | IFM_2500_T, 0, NULL); + + if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T)) + ifmedia_add(&vsi->media, IFM_ETHER | IFM_5000_T, 0, NULL); + if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) || phy_types & (I40E_CAP_PHY_TYPE_XFI) || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) || phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) || phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) || phy_types & (I40E_CAP_PHY_TYPE_XLAUI) || phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_SFI)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL); } /********************************************************************* * * Setup networking device structure and register an interface. * **********************************************************************/ int ixl_setup_interface(device_t dev, struct ixl_vsi *vsi) { struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct ifnet *ifp; struct i40e_hw *hw = vsi->hw; struct ixl_queue *que = vsi->queues; struct i40e_aq_get_phy_abilities_resp abilities; enum i40e_status_code aq_error = 0; INIT_DEBUGOUT("ixl_setup_interface: begin"); ifp = vsi->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not allocate ifnet structure\n"); - return (-1); + return (ENOMEM); } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_init = ixl_init; ifp->if_softc = vsi; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = ixl_ioctl; #if __FreeBSD_version >= 1100036 if_setgetcounterfn(ifp, ixl_get_counter); #endif ifp->if_transmit = ixl_mq_start; ifp->if_qflush = ixl_qflush; ifp->if_snd.ifq_maxlen = que->num_tx_desc - 2; vsi->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; /* Set TSO limits */ ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN); ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS; ifp->if_hw_tsomaxsegsize = IXL_MAX_DMA_SEG_SIZE; /* * Tell the upper layer(s) we support long frames. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_HWCSUM; ifp->if_capabilities |= IFCAP_HWCSUM_IPV6; ifp->if_capabilities |= IFCAP_TSO; ifp->if_capabilities |= IFCAP_JUMBO_MTU; ifp->if_capabilities |= IFCAP_LRO; /* VLAN capabilties */ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM; ifp->if_capenable = ifp->if_capabilities; /* ** Don't turn this on by default, if vlans are ** created on another pseudo device (eg. lagg) ** then vlan events are not passed thru, breaking ** operation, but with HW FILTER off it works. If ** using vlans directly on the ixl driver you can ** enable this and get full hardware tag filtering. */ ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; /* * Specify the media types supported by this adapter and register * callbacks to update media and link information */ ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change, ixl_media_status); - aq_error = i40e_aq_get_phy_capabilities(hw, - FALSE, TRUE, &abilities, NULL); - /* May need delay to detect fiber correctly */ - if (aq_error == I40E_ERR_UNKNOWN_PHY) { - i40e_msec_delay(200); - aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, - TRUE, &abilities, NULL); - } - if (aq_error) { - if (aq_error == I40E_ERR_UNKNOWN_PHY) - device_printf(dev, "Unknown PHY type detected!\n"); - else - device_printf(dev, - "Error getting supported media types, err %d," - " AQ error %d\n", aq_error, hw->aq.asq_last_status); - } else { - pf->supported_speeds = abilities.link_speed; + if ((atomic_load_acq_int(&pf->state) & IXL_PF_STATE_RECOVERY_MODE) == 0) { + aq_error = i40e_aq_get_phy_capabilities(hw, + FALSE, TRUE, &abilities, NULL); + /* May need delay to detect fiber correctly */ + if (aq_error == I40E_ERR_UNKNOWN_PHY) { + i40e_msec_delay(200); + aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, + TRUE, &abilities, NULL); + } + if (aq_error) { + if (aq_error == I40E_ERR_UNKNOWN_PHY) + device_printf(dev, "Unknown PHY type detected!\n"); + else + device_printf(dev, + "Error getting supported media types, err %d," + " AQ error %d\n", aq_error, hw->aq.asq_last_status); + } else { + pf->supported_speeds = abilities.link_speed; #if __FreeBSD_version >= 1100000 - ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->supported_speeds); + ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->supported_speeds); #else - if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds)); + if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds)); #endif - ixl_add_ifmedia(vsi, hw->phy.phy_types); + ixl_add_ifmedia(vsi, hw->phy.phy_types); + } } /* Use autoselect media by default */ ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO); ether_ifattach(ifp, hw->mac.addr); return (0); } /* ** Run when the Admin Queue gets a link state change interrupt. */ void ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct i40e_aqc_get_link_status *status = (struct i40e_aqc_get_link_status *)&e->desc.params.raw; /* Request link status from adapter */ hw->phy.get_link_info = TRUE; i40e_get_link_status(hw, &pf->link_up); /* Print out message if an unqualified module is found */ if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && (pf->advertised_speed) && (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && (!(status->link_info & I40E_AQ_LINK_UP))) device_printf(dev, "Link failed because " "an unqualified module was detected!\n"); /* Update OS link info */ ixl_update_link_status(pf); } /********************************************************************* * * Get Firmware Switch configuration * - this will need to be more robust when more complex * switch configurations are enabled. * **********************************************************************/ int ixl_switch_config(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; device_t dev = vsi->dev; struct i40e_aqc_get_switch_config_resp *sw_config; u8 aq_buf[I40E_AQ_LARGE_BUF]; int ret; u16 next = 0; memset(&aq_buf, 0, sizeof(aq_buf)); sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; ret = i40e_aq_get_switch_config(hw, sw_config, sizeof(aq_buf), &next, NULL); if (ret) { device_printf(dev, "aq_get_switch_config() failed, error %d," " aq_error %d\n", ret, pf->hw.aq.asq_last_status); return (ret); } if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) { device_printf(dev, "Switch config: header reported: %d in structure, %d total\n", sw_config->header.num_reported, sw_config->header.num_total); for (int i = 0; i < sw_config->header.num_reported; i++) { device_printf(dev, "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i, sw_config->element[i].element_type, sw_config->element[i].seid, sw_config->element[i].uplink_seid, sw_config->element[i].downlink_seid); } } /* Simplified due to a single VSI */ vsi->uplink_seid = sw_config->element[0].uplink_seid; vsi->downlink_seid = sw_config->element[0].downlink_seid; vsi->seid = sw_config->element[0].seid; return (ret); } /********************************************************************* * * Initialize the VSI: this handles contexts, which means things * like the number of descriptors, buffer size, * plus we init the rings thru this function. * **********************************************************************/ int ixl_initialize_vsi(struct ixl_vsi *vsi) { struct ixl_pf *pf = vsi->back; struct ixl_queue *que = vsi->queues; device_t dev = vsi->dev; struct i40e_hw *hw = vsi->hw; struct i40e_vsi_context ctxt; int tc_queues; int err = 0; memset(&ctxt, 0, sizeof(ctxt)); ctxt.seid = vsi->seid; if (pf->veb_seid != 0) ctxt.uplink_seid = pf->veb_seid; ctxt.pf_num = hw->pf_id; err = i40e_aq_get_vsi_params(hw, &ctxt, NULL); if (err) { device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d" " aq_error %d\n", err, hw->aq.asq_last_status); return (err); } ixl_dbg(pf, IXL_DBG_SWITCH_INFO, "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, " "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, " "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid, ctxt.uplink_seid, ctxt.vsi_number, ctxt.vsis_allocated, ctxt.vsis_unallocated, ctxt.flags, ctxt.pf_num, ctxt.vf_num, ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits); /* ** Set the queue and traffic class bits ** - when multiple traffic classes are supported ** this will need to be more robust. */ ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG; /* In contig mode, que_mapping[0] is first queue index used by this VSI */ ctxt.info.queue_mapping[0] = 0; /* * This VSI will only use traffic class 0; start traffic class 0's * queue allocation at queue 0, and assign it 2^tc_queues queues (though * the driver may not use all of them). */ tc_queues = bsrl(pf->qtag.num_allocated); ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) | ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) & I40E_AQ_VSI_TC_QUE_NUMBER_MASK); /* Set VLAN receive stripping mode */ ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID; ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL; if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING) ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; else ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING; #ifdef IXL_IW /* Set TCP Enable for iWARP capable VSI */ if (ixl_enable_iwarp && pf->iw_enabled) { ctxt.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA; } #endif /* Save VSI number and info for use later */ vsi->vsi_num = ctxt.vsi_number; bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info)); /* Reset VSI statistics */ ixl_vsi_reset_stats(vsi); - vsi->hw_filters_add = 0; - vsi->hw_filters_del = 0; ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF); err = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (err) { device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d," " aq_error %d\n", err, hw->aq.asq_last_status); return (err); } for (int i = 0; i < vsi->num_queues; i++, que++) { struct tx_ring *txr = &que->txr; struct rx_ring *rxr = &que->rxr; struct i40e_hmc_obj_txq tctx; struct i40e_hmc_obj_rxq rctx; u32 txctl; u16 size; /* Setup the HMC TX Context */ size = que->num_tx_desc * sizeof(struct i40e_tx_desc); bzero(&tctx, sizeof(tctx)); tctx.new_context = 1; tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS); tctx.qlen = que->num_tx_desc; tctx.fc_ena = 0; /* Disable FCoE */ /* * This value needs to pulled from the VSI that this queue * is assigned to. Index into array is traffic class. */ tctx.rdylist = vsi->info.qs_handle[0]; /* * Set these to enable Head Writeback * - Address is last entry in TX ring (reserved for HWB index) * Leave these as 0 for Descriptor Writeback */ if (vsi->enable_head_writeback) { tctx.head_wb_ena = 1; tctx.head_wb_addr = txr->dma.pa + (que->num_tx_desc * sizeof(struct i40e_tx_desc)); } tctx.rdylist_act = 0; err = i40e_clear_lan_tx_queue_context(hw, i); if (err) { device_printf(dev, "Unable to clear TX context\n"); break; } err = i40e_set_lan_tx_queue_context(hw, i, &tctx); if (err) { device_printf(dev, "Unable to set TX context\n"); break; } /* Associate the ring with this PF */ txctl = I40E_QTX_CTL_PF_QUEUE; txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & I40E_QTX_CTL_PF_INDX_MASK); wr32(hw, I40E_QTX_CTL(i), txctl); ixl_flush(hw); /* Do ring (re)init */ ixl_init_tx_ring(que); /* Next setup the HMC RX Context */ if (vsi->max_frame_size <= MCLBYTES) rxr->mbuf_sz = MCLBYTES; else rxr->mbuf_sz = MJUMPAGESIZE; u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len; /* Set up an RX context for the HMC */ memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq)); rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT; /* ignore header split for now */ rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT; rctx.rxmax = (vsi->max_frame_size < max_rxmax) ? vsi->max_frame_size : max_rxmax; rctx.dtype = 0; rctx.dsize = 1; /* do 32byte descriptors */ rctx.hsplit_0 = 0; /* no header split */ rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS); rctx.qlen = que->num_rx_desc; rctx.tphrdesc_ena = 1; rctx.tphwdesc_ena = 1; rctx.tphdata_ena = 0; /* Header Split related */ rctx.tphhead_ena = 0; /* Header Split related */ rctx.lrxqthresh = 2; /* Interrupt at <128 desc avail */ rctx.crcstrip = 1; rctx.l2tsel = 1; rctx.showiv = 1; /* Strip inner VLAN header */ rctx.fc_ena = 0; /* Disable FCoE */ rctx.prefena = 1; /* Prefetch descriptors */ err = i40e_clear_lan_rx_queue_context(hw, i); if (err) { device_printf(dev, "Unable to clear RX context %d\n", i); break; } err = i40e_set_lan_rx_queue_context(hw, i, &rctx); if (err) { device_printf(dev, "Unable to set RX context %d\n", i); break; } err = ixl_init_rx_ring(que); if (err) { device_printf(dev, "Fail in init_rx_ring %d\n", i); break; } #ifdef DEV_NETMAP /* preserve queue */ if (vsi->ifp->if_capenable & IFCAP_NETMAP) { struct netmap_adapter *na = NA(vsi->ifp); struct netmap_kring *kring = na->rx_rings[i]; int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); wr32(vsi->hw, I40E_QRX_TAIL(que->me), t); } else #endif /* DEV_NETMAP */ wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_rx_desc - 1); } return (err); } - - - void ixl_vsi_free_queues(struct ixl_vsi *vsi) { struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct ixl_queue *que = vsi->queues; if (NULL == vsi->queues) return; for (int i = 0; i < vsi->num_queues; i++, que++) { struct tx_ring *txr = &que->txr; struct rx_ring *rxr = &que->rxr; if (!mtx_initialized(&txr->mtx)) /* uninitialized */ continue; IXL_TX_LOCK(txr); if (txr->br) buf_ring_free(txr->br, M_DEVBUF); ixl_free_que_tx(que); if (txr->base) i40e_free_dma_mem(&pf->hw, &txr->dma); IXL_TX_UNLOCK(txr); IXL_TX_LOCK_DESTROY(txr); if (!mtx_initialized(&rxr->mtx)) /* uninitialized */ continue; IXL_RX_LOCK(rxr); ixl_free_que_rx(que); if (rxr->base) i40e_free_dma_mem(&pf->hw, &rxr->dma); IXL_RX_UNLOCK(rxr); IXL_RX_LOCK_DESTROY(rxr); } + + sysctl_ctx_free(&vsi->sysctl_ctx); } /********************************************************************* * * Free all VSI structs. * **********************************************************************/ void ixl_free_vsi(struct ixl_vsi *vsi) { - /* Free station queues */ ixl_vsi_free_queues(vsi); if (vsi->queues) free(vsi->queues, M_DEVBUF); /* Free VSI filter list */ ixl_free_mac_filters(vsi); } void ixl_free_mac_filters(struct ixl_vsi *vsi) { struct ixl_mac_filter *f; while (!SLIST_EMPTY(&vsi->ftl)) { f = SLIST_FIRST(&vsi->ftl); SLIST_REMOVE_HEAD(&vsi->ftl, next); free(f, M_DEVBUF); } + + vsi->num_hw_filters = 0; } /* * Fill out fields in queue struct and setup tx/rx memory and structs */ static int ixl_vsi_setup_queue(struct ixl_vsi *vsi, struct ixl_queue *que, int index) { struct ixl_pf *pf = (struct ixl_pf *)vsi->back; device_t dev = pf->dev; struct i40e_hw *hw = &pf->hw; struct tx_ring *txr = &que->txr; struct rx_ring *rxr = &que->rxr; int error = 0; int rsize, tsize; que->num_tx_desc = vsi->num_tx_desc; que->num_rx_desc = vsi->num_rx_desc; que->me = index; que->vsi = vsi; txr->que = que; txr->tail = I40E_QTX_TAIL(que->me); /* Initialize the TX lock */ snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", device_get_nameunit(dev), que->me); mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF); /* * Create the TX descriptor ring * * In Head Writeback mode, the descriptor ring is one bigger * than the number of descriptors for space for the HW to * write back index of last completed descriptor. */ if (vsi->enable_head_writeback) { tsize = roundup2((que->num_tx_desc * sizeof(struct i40e_tx_desc)) + sizeof(u32), DBA_ALIGN); } else { tsize = roundup2((que->num_tx_desc * sizeof(struct i40e_tx_desc)), DBA_ALIGN); } if (i40e_allocate_dma_mem(hw, &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) { device_printf(dev, "Unable to allocate TX Descriptor memory\n"); error = ENOMEM; goto err_destroy_tx_mtx; } txr->base = (struct i40e_tx_desc *)txr->dma.va; bzero((void *)txr->base, tsize); /* Now allocate transmit soft structs for the ring */ if (ixl_allocate_tx_data(que)) { device_printf(dev, "Critical Failure setting up TX structures\n"); error = ENOMEM; goto err_free_tx_dma; } /* Allocate a buf ring */ txr->br = buf_ring_alloc(DEFAULT_TXBRSZ, M_DEVBUF, M_NOWAIT, &txr->mtx); if (txr->br == NULL) { device_printf(dev, "Critical Failure setting up TX buf ring\n"); error = ENOMEM; goto err_free_tx_data; } rsize = roundup2(que->num_rx_desc * sizeof(union i40e_rx_desc), DBA_ALIGN); rxr->que = que; rxr->tail = I40E_QRX_TAIL(que->me); /* Initialize the RX side lock */ snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", device_get_nameunit(dev), que->me); mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF); if (i40e_allocate_dma_mem(hw, &rxr->dma, i40e_mem_reserved, rsize, 4096)) { device_printf(dev, "Unable to allocate RX Descriptor memory\n"); error = ENOMEM; goto err_destroy_rx_mtx; } rxr->base = (union i40e_rx_desc *)rxr->dma.va; bzero((void *)rxr->base, rsize); /* Allocate receive soft structs for the ring*/ if (ixl_allocate_rx_data(que)) { device_printf(dev, "Critical Failure setting up receive structs\n"); error = ENOMEM; goto err_free_rx_dma; } return (0); err_free_rx_dma: i40e_free_dma_mem(&pf->hw, &rxr->dma); err_destroy_rx_mtx: mtx_destroy(&rxr->mtx); /* err_free_tx_buf_ring */ buf_ring_free(txr->br, M_DEVBUF); err_free_tx_data: ixl_free_que_tx(que); err_free_tx_dma: i40e_free_dma_mem(&pf->hw, &txr->dma); err_destroy_tx_mtx: mtx_destroy(&txr->mtx); return (error); } int ixl_vsi_setup_queues(struct ixl_vsi *vsi) { struct ixl_queue *que; int error = 0; for (int i = 0; i < vsi->num_queues; i++) { que = &vsi->queues[i]; error = ixl_vsi_setup_queue(vsi, que, i); if (error) break; } + if (error == 0) + sysctl_ctx_init(&vsi->sysctl_ctx); + return (error); } /********************************************************************* * * Allocate memory for the VSI (virtual station interface) and their * associated queues, rings and the descriptors associated with each, * called only once at attach. * **********************************************************************/ int ixl_setup_stations(struct ixl_pf *pf) { device_t dev = pf->dev; struct ixl_vsi *vsi; int error = 0; vsi = &pf->vsi; vsi->back = (void *)pf; vsi->hw = &pf->hw; vsi->id = 0; vsi->num_vlans = 0; vsi->back = pf; if (pf->msix > 1) vsi->flags |= IXL_FLAGS_USES_MSIX; /* Get memory for the station queues */ if (!(vsi->queues = (struct ixl_queue *) malloc(sizeof(struct ixl_queue) * vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate queue memory\n"); error = ENOMEM; goto ixl_setup_stations_err; } /* Then setup each queue */ error = ixl_vsi_setup_queues(vsi); ixl_setup_stations_err: return (error); } /* ** Provide a update to the queue RX ** interrupt moderation value. */ void ixl_set_queue_rx_itr(struct ixl_queue *que) { struct ixl_vsi *vsi = que->vsi; struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct i40e_hw *hw = vsi->hw; struct rx_ring *rxr = &que->rxr; u16 rx_itr; u16 rx_latency = 0; int rx_bytes; /* Idle, do nothing */ if (rxr->bytes == 0) return; if (pf->dynamic_rx_itr) { rx_bytes = rxr->bytes/rxr->itr; rx_itr = rxr->itr; /* Adjust latency range */ switch (rxr->latency) { case IXL_LOW_LATENCY: if (rx_bytes > 10) { rx_latency = IXL_AVE_LATENCY; rx_itr = IXL_ITR_20K; } break; case IXL_AVE_LATENCY: if (rx_bytes > 20) { rx_latency = IXL_BULK_LATENCY; rx_itr = IXL_ITR_8K; } else if (rx_bytes <= 10) { rx_latency = IXL_LOW_LATENCY; rx_itr = IXL_ITR_100K; } break; case IXL_BULK_LATENCY: if (rx_bytes <= 20) { rx_latency = IXL_AVE_LATENCY; rx_itr = IXL_ITR_20K; } break; } rxr->latency = rx_latency; if (rx_itr != rxr->itr) { /* do an exponential smoothing */ rx_itr = (10 * rx_itr * rxr->itr) / ((9 * rx_itr) + rxr->itr); rxr->itr = min(rx_itr, IXL_MAX_ITR); wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, que->me), rxr->itr); } } else { /* We may have have toggled to non-dynamic */ if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC) vsi->rx_itr_setting = pf->rx_itr; /* Update the hardware if needed */ if (rxr->itr != vsi->rx_itr_setting) { rxr->itr = vsi->rx_itr_setting; wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, que->me), rxr->itr); } } rxr->bytes = 0; rxr->packets = 0; return; } /* ** Provide a update to the queue TX ** interrupt moderation value. */ void ixl_set_queue_tx_itr(struct ixl_queue *que) { struct ixl_vsi *vsi = que->vsi; struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct i40e_hw *hw = vsi->hw; struct tx_ring *txr = &que->txr; u16 tx_itr; u16 tx_latency = 0; int tx_bytes; /* Idle, do nothing */ if (txr->bytes == 0) return; if (pf->dynamic_tx_itr) { tx_bytes = txr->bytes/txr->itr; tx_itr = txr->itr; switch (txr->latency) { case IXL_LOW_LATENCY: if (tx_bytes > 10) { tx_latency = IXL_AVE_LATENCY; tx_itr = IXL_ITR_20K; } break; case IXL_AVE_LATENCY: if (tx_bytes > 20) { tx_latency = IXL_BULK_LATENCY; tx_itr = IXL_ITR_8K; } else if (tx_bytes <= 10) { tx_latency = IXL_LOW_LATENCY; tx_itr = IXL_ITR_100K; } break; case IXL_BULK_LATENCY: if (tx_bytes <= 20) { tx_latency = IXL_AVE_LATENCY; tx_itr = IXL_ITR_20K; } break; } txr->latency = tx_latency; if (tx_itr != txr->itr) { /* do an exponential smoothing */ tx_itr = (10 * tx_itr * txr->itr) / ((9 * tx_itr) + txr->itr); txr->itr = min(tx_itr, IXL_MAX_ITR); wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, que->me), txr->itr); } } else { /* We may have have toggled to non-dynamic */ if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC) vsi->tx_itr_setting = pf->tx_itr; /* Update the hardware if needed */ if (txr->itr != vsi->tx_itr_setting) { txr->itr = vsi->tx_itr_setting; wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, que->me), txr->itr); } } txr->bytes = 0; txr->packets = 0; return; } void -ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi, - struct sysctl_ctx_list *ctx, const char *sysctl_name) +ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls) { struct sysctl_oid *tree; struct sysctl_oid_list *child; struct sysctl_oid_list *vsi_list; - tree = device_get_sysctl_tree(pf->dev); + tree = device_get_sysctl_tree(vsi->dev); child = SYSCTL_CHILDREN(tree); - vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name, - CTLFLAG_RD, NULL, "VSI Number"); + vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name, + CTLFLAG_RD, NULL, "VSI Number"); + vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); + ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats); - ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats); + if (queues_sysctls) + ixl_vsi_add_queues_stats(vsi); } -#ifdef IXL_DEBUG -/** - * ixl_sysctl_qtx_tail_handler - * Retrieves I40E_QTX_TAIL value from hardware - * for a sysctl. - */ -static int -ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS) -{ - struct ixl_queue *que; - int error; - u32 val; - - que = ((struct ixl_queue *)oidp->oid_arg1); - if (!que) return 0; - - val = rd32(que->vsi->hw, que->txr.tail); - error = sysctl_handle_int(oidp, &val, 0, req); - if (error || !req->newptr) - return error; - return (0); -} - -/** - * ixl_sysctl_qrx_tail_handler - * Retrieves I40E_QRX_TAIL value from hardware - * for a sysctl. - */ -static int -ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS) -{ - struct ixl_queue *que; - int error; - u32 val; - - que = ((struct ixl_queue *)oidp->oid_arg1); - if (!que) return 0; - - val = rd32(que->vsi->hw, que->rxr.tail); - error = sysctl_handle_int(oidp, &val, 0, req); - if (error || !req->newptr) - return error; - return (0); -} -#endif - /* * Used to set the Tx ITR value for all of the PF LAN VSI's queues. * Writes to the ITR registers immediately. */ static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; int error = 0; int requested_tx_itr; requested_tx_itr = pf->tx_itr; error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req); if ((error) || (req->newptr == NULL)) return (error); if (pf->dynamic_tx_itr) { device_printf(dev, "Cannot set TX itr value while dynamic TX itr is enabled\n"); return (EINVAL); } if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) { device_printf(dev, "Invalid TX itr value; value must be between 0 and %d\n", IXL_MAX_ITR); return (EINVAL); } pf->tx_itr = requested_tx_itr; ixl_configure_tx_itr(pf); return (error); } /* * Used to set the Rx ITR value for all of the PF LAN VSI's queues. * Writes to the ITR registers immediately. */ static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; int error = 0; int requested_rx_itr; requested_rx_itr = pf->rx_itr; error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req); if ((error) || (req->newptr == NULL)) return (error); if (pf->dynamic_rx_itr) { device_printf(dev, "Cannot set RX itr value while dynamic RX itr is enabled\n"); return (EINVAL); } if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) { device_printf(dev, "Invalid RX itr value; value must be between 0 and %d\n", IXL_MAX_ITR); return (EINVAL); } pf->rx_itr = requested_rx_itr; ixl_configure_rx_itr(pf); return (error); } void ixl_add_hw_stats(struct ixl_pf *pf) { device_t dev = pf->dev; - struct ixl_vsi *vsi = &pf->vsi; - struct ixl_queue *queues = vsi->queues; struct i40e_hw_port_stats *pf_stats = &pf->stats; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); - struct sysctl_oid_list *vsi_list; - struct sysctl_oid *queue_node; - struct sysctl_oid_list *queue_list; - - struct tx_ring *txr; - struct rx_ring *rxr; - char queue_namebuf[QUEUE_NAME_LEN]; - /* Driver statistics */ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events", CTLFLAG_RD, &pf->watchdog_events, "Watchdog timeouts"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq", CTLFLAG_RD, &pf->admin_irq, "Admin Queue IRQ Handled"); - ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf"); - vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node); - - /* Queue statistics */ - for (int q = 0; q < vsi->num_queues; q++) { - snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q); - queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, - OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #"); - queue_list = SYSCTL_CHILDREN(queue_node); - - txr = &(queues[q].txr); - rxr = &(queues[q].rxr); - - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed", - CTLFLAG_RD, &(queues[q].mbuf_defrag_failed), - "m_defrag() failed"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", - CTLFLAG_RD, &(queues[q].irqs), - "irqs on this queue"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", - CTLFLAG_RD, &(queues[q].tso), - "TSO"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed", - CTLFLAG_RD, &(queues[q].tx_dmamap_failed), - "Driver tx dma failure in xmit"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small", - CTLFLAG_RD, &(queues[q].mss_too_small), - "TSO sends with an MSS less than 64"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", - CTLFLAG_RD, &(txr->no_desc), - "Queue No Descriptor Available"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", - CTLFLAG_RD, &(txr->total_packets), - "Queue Packets Transmitted"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes", - CTLFLAG_RD, &(txr->tx_bytes), - "Queue Bytes Transmitted"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", - CTLFLAG_RD, &(rxr->rx_packets), - "Queue Packets Received"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", - CTLFLAG_RD, &(rxr->rx_bytes), - "Queue Bytes Received"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_desc_err", - CTLFLAG_RD, &(rxr->desc_errs), - "Queue Rx Descriptor Errors"); - SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr", - CTLFLAG_RD, &(rxr->itr), 0, - "Queue Rx ITR Interval"); - SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr", - CTLFLAG_RD, &(txr->itr), 0, - "Queue Tx ITR Interval"); -#ifdef IXL_DEBUG - SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "txr_watchdog", - CTLFLAG_RD, &(txr->watchdog_timer), 0, - "Ticks before watchdog timer causes interface reinit"); - SYSCTL_ADD_U16(ctx, queue_list, OID_AUTO, "tx_next_avail", - CTLFLAG_RD, &(txr->next_avail), 0, - "Next TX descriptor to be used"); - SYSCTL_ADD_U16(ctx, queue_list, OID_AUTO, "tx_next_to_clean", - CTLFLAG_RD, &(txr->next_to_clean), 0, - "Next TX descriptor to be cleaned"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done", - CTLFLAG_RD, &(rxr->not_done), - "Queue Rx Descriptors not Done"); - SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_refresh", - CTLFLAG_RD, &(rxr->next_refresh), 0, - "Queue Rx Descriptors not Done"); - SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check", - CTLFLAG_RD, &(rxr->next_check), 0, - "Queue Rx Descriptors not Done"); - SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail", - CTLTYPE_UINT | CTLFLAG_RD, &queues[q], - sizeof(struct ixl_queue), - ixl_sysctl_qrx_tail_handler, "IU", - "Queue Receive Descriptor Tail"); - SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail", - CTLTYPE_UINT | CTLFLAG_RD, &queues[q], - sizeof(struct ixl_queue), - ixl_sysctl_qtx_tail_handler, "IU", - "Queue Transmit Descriptor Tail"); -#endif - } - + ixl_vsi_add_sysctls(&pf->vsi, "pf", true); /* MAC stats */ ixl_add_sysctls_mac_stats(ctx, child, pf_stats); } void -ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx, - struct sysctl_oid_list *child, - struct i40e_eth_stats *eth_stats) -{ - struct ixl_sysctl_info ctls[] = - { - {ð_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"}, - {ð_stats->rx_unicast, "ucast_pkts_rcvd", - "Unicast Packets Received"}, - {ð_stats->rx_multicast, "mcast_pkts_rcvd", - "Multicast Packets Received"}, - {ð_stats->rx_broadcast, "bcast_pkts_rcvd", - "Broadcast Packets Received"}, - {ð_stats->rx_discards, "rx_discards", "Discarded RX packets"}, - {ð_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"}, - {ð_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"}, - {ð_stats->tx_multicast, "mcast_pkts_txd", - "Multicast Packets Transmitted"}, - {ð_stats->tx_broadcast, "bcast_pkts_txd", - "Broadcast Packets Transmitted"}, - // end - {0,0,0} - }; - - struct ixl_sysctl_info *entry = ctls; - while (entry->stat != 0) - { - SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name, - CTLFLAG_RD, entry->stat, - entry->description); - entry++; - } -} - -void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct i40e_hw_port_stats *stats) { struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac", CTLFLAG_RD, NULL, "Mac Statistics"); struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node); struct i40e_eth_stats *eth_stats = &stats->eth; ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats); struct ixl_sysctl_info ctls[] = { {&stats->crc_errors, "crc_errors", "CRC Errors"}, {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"}, {&stats->mac_local_faults, "local_faults", "MAC Local Faults"}, {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"}, {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"}, /* Packet Reception Stats */ {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"}, {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"}, {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"}, {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"}, {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"}, {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"}, {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"}, {&stats->rx_undersize, "rx_undersize", "Undersized packets received"}, {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"}, {&stats->rx_oversize, "rx_oversized", "Oversized packets received"}, {&stats->rx_jabber, "rx_jabber", "Received Jabber"}, {&stats->checksum_error, "checksum_errors", "Checksum Errors"}, /* Packet Transmission Stats */ {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"}, {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"}, {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"}, {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"}, {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"}, {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"}, {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"}, /* Flow control */ {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"}, {&stats->link_xon_rx, "xon_recvd", "Link XON received"}, {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"}, {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"}, /* End */ {0,0,0} }; struct ixl_sysctl_info *entry = ctls; while (entry->stat != 0) { SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name, CTLFLAG_RD, entry->stat, entry->description); entry++; } } void ixl_set_rss_key(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; device_t dev = pf->dev; u32 rss_seed[IXL_RSS_KEY_SIZE_REG]; enum i40e_status_code status; #ifdef RSS /* Fetch the configured RSS key */ rss_getkey((uint8_t *) &rss_seed); #else ixl_get_default_rss_key(rss_seed); #endif /* Fill out hash function seed */ if (hw->mac.type == I40E_MAC_X722) { struct i40e_aqc_get_set_rss_key_data key_data; bcopy(rss_seed, &key_data, 52); status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data); if (status) device_printf(dev, "i40e_aq_set_rss_key status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); } else { for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]); } } /* * Configure enabled PCTYPES for RSS. */ void ixl_set_rss_pctypes(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; u64 set_hena = 0, hena; #ifdef RSS u32 rss_hash_config; rss_hash_config = rss_gethashconfig(); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP); #else if (hw->mac.type == I40E_MAC_X722) set_hena = IXL_DEFAULT_RSS_HENA_X722; else set_hena = IXL_DEFAULT_RSS_HENA_XL710; #endif hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); hena |= set_hena; i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); } void ixl_set_rss_hlut(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct ixl_vsi *vsi = &pf->vsi; int i, que_id; int lut_entry_width; u32 lut = 0; enum i40e_status_code status; lut_entry_width = pf->hw.func_caps.rss_table_entry_width; /* Populate the LUT with max no. of queues in round robin fashion */ u8 hlut_buf[512]; for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) { #ifdef RSS /* * Fetch the RSS bucket id for the given indirection entry. * Cap it at the number of configured buckets (which is * num_queues.) */ que_id = rss_get_indirection_to_bucket(i); que_id = que_id % vsi->num_queues; #else que_id = i % vsi->num_queues; #endif lut = (que_id & ((0x1 << lut_entry_width) - 1)); hlut_buf[i] = lut; } if (hw->mac.type == I40E_MAC_X722) { status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf)); if (status) device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); } else { for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++) wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]); ixl_flush(hw); } } /* ** Setup the PF's RSS parameters. */ void ixl_config_rss(struct ixl_pf *pf) { ixl_set_rss_key(pf); ixl_set_rss_pctypes(pf); ixl_set_rss_hlut(pf); } /* ** This routine is run via an vlan config EVENT, ** it enables us to use the HW Filter table since ** we can get the vlan id. This just creates the ** entry in the soft version of the VFTA, init will ** repopulate the real table. */ void ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) { struct ixl_vsi *vsi = ifp->if_softc; struct i40e_hw *hw = vsi->hw; struct ixl_pf *pf = (struct ixl_pf *)vsi->back; if (ifp->if_softc != arg) /* Not our event */ return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; IXL_PF_LOCK(pf); ++vsi->num_vlans; ixl_add_filter(vsi, hw->mac.addr, vtag); IXL_PF_UNLOCK(pf); } /* ** This routine is run via an vlan ** unconfig EVENT, remove our entry ** in the soft vfta. */ void ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) { struct ixl_vsi *vsi = ifp->if_softc; struct i40e_hw *hw = vsi->hw; struct ixl_pf *pf = (struct ixl_pf *)vsi->back; if (ifp->if_softc != arg) return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; IXL_PF_LOCK(pf); --vsi->num_vlans; ixl_del_filter(vsi, hw->mac.addr, vtag); IXL_PF_UNLOCK(pf); } /* -** This routine updates vlan filters, called by init -** it scans the filter table and then updates the hw -** after a soft reset. -*/ + * In some firmware versions there is default MAC/VLAN filter + * configured which interferes with filters managed by driver. + * Make sure it's removed. + */ void -ixl_setup_vlan_filters(struct ixl_vsi *vsi) +ixl_del_default_hw_filters(struct ixl_vsi *vsi) { - struct ixl_mac_filter *f; - int cnt = 0, flags; + struct i40e_aqc_remove_macvlan_element_data e; - if (vsi->num_vlans == 0) - return; - /* - ** Scan the filter list for vlan entries, - ** mark them for addition and then call - ** for the AQ update. - */ - SLIST_FOREACH(f, &vsi->ftl, next) { - if (f->flags & IXL_FILTER_VLAN) { - f->flags |= - (IXL_FILTER_ADD | - IXL_FILTER_USED); - cnt++; - } - } - if (cnt == 0) { - printf("setup vlan: no filters found!\n"); - return; - } - flags = IXL_FILTER_VLAN; - flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); - ixl_add_hw_filters(vsi, flags, cnt); - return; + bzero(&e, sizeof(e)); + bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); + e.vlan_tag = 0; + e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; + i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); + + bzero(&e, sizeof(e)); + bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); + e.vlan_tag = 0; + e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | + I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; + i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); } +static enum i40e_status_code +ixl_set_lla(struct ixl_vsi *vsi) +{ + struct i40e_hw *hw = vsi->hw; + u8 tmpaddr[ETHER_ADDR_LEN]; + enum i40e_status_code status; + + status = I40E_SUCCESS; + + bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETHER_ADDR_LEN); + if (memcmp(hw->mac.addr, tmpaddr, ETHER_ADDR_LEN) == 0) + goto set_lla_exit; + + status = i40e_validate_mac_addr(tmpaddr); + if (status != I40E_SUCCESS) + goto set_lla_exit; + + ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); + bcopy(tmpaddr, hw->mac.addr, ETHER_ADDR_LEN); + status = i40e_aq_mac_address_write(hw, + I40E_AQC_WRITE_TYPE_LAA_ONLY, + hw->mac.addr, NULL); + if (status != I40E_SUCCESS) + goto set_lla_exit; + + ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); +set_lla_exit: + return (status); +} + /* ** Initialize filter list and add filters that the hardware ** needs to know about. ** -** Requires VSI's filter list & seid to be set before calling. +** Requires VSI's seid to be set before calling. */ void ixl_init_filters(struct ixl_vsi *vsi) { struct ixl_pf *pf = (struct ixl_pf *)vsi->back; + /* Initialize mac filter list for VSI */ + SLIST_INIT(&vsi->ftl); + vsi->num_hw_filters = 0; + /* Add broadcast address */ ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY); + if (IXL_VSI_IS_VF(vsi)) + return; + + ixl_del_default_hw_filters(vsi); + + ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY); + /* * Prevent Tx flow control frames from being sent out by * non-firmware transmitters. * This affects every VSI in the PF. */ if (pf->enable_tx_fc_filter) i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid); } /* ** This routine adds mulicast filters */ void ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr) { struct ixl_mac_filter *f; /* Does one already exist */ f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY); if (f != NULL) return; f = ixl_get_filter(vsi); if (f == NULL) { printf("WARNING: no filter available!!\n"); return; } bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); f->vlan = IXL_VLAN_ANY; f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC); return; } void ixl_reconfigure_filters(struct ixl_vsi *vsi) { - ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs); + ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_hw_filters); } /* ** This routine adds macvlan filters */ void ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) { struct ixl_mac_filter *f, *tmp; struct ixl_pf *pf; device_t dev; DEBUGOUT("ixl_add_filter: begin"); pf = vsi->back; dev = pf->dev; /* Does one already exist */ f = ixl_find_filter(vsi, macaddr, vlan); if (f != NULL) return; /* ** Is this the first vlan being registered, if so we ** need to remove the ANY filter that indicates we are ** not in a vlan, and replace that with a 0 filter. */ if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) { tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY); if (tmp != NULL) { ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY); ixl_add_filter(vsi, macaddr, 0); } } f = ixl_get_filter(vsi); if (f == NULL) { device_printf(dev, "WARNING: no filter available!!\n"); return; } bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); f->vlan = vlan; f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); if (f->vlan != IXL_VLAN_ANY) f->flags |= IXL_FILTER_VLAN; else vsi->num_macs++; ixl_add_hw_filters(vsi, f->flags, 1); return; } void ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) { struct ixl_mac_filter *f; f = ixl_find_filter(vsi, macaddr, vlan); if (f == NULL) return; f->flags |= IXL_FILTER_DEL; ixl_del_hw_filters(vsi, 1); vsi->num_macs--; /* Check if this is the last vlan removal */ if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) { /* Switch back to a non-vlan filter */ ixl_del_filter(vsi, macaddr, 0); ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY); } return; } /* ** Find the filter with both matching mac addr and vlan id */ struct ixl_mac_filter * ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) { struct ixl_mac_filter *f; bool match = FALSE; SLIST_FOREACH(f, &vsi->ftl, next) { if (!cmp_etheraddr(f->macaddr, macaddr)) continue; if (f->vlan == vlan) { match = TRUE; break; } } if (!match) f = NULL; return (f); } /* ** This routine takes additions to the vsi filter ** table and creates an Admin Queue call to create ** the filters in the hardware. */ void ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt) { struct i40e_aqc_add_macvlan_element_data *a, *b; struct ixl_mac_filter *f; struct ixl_pf *pf; struct i40e_hw *hw; device_t dev; int err, j = 0; pf = vsi->back; dev = pf->dev; hw = &pf->hw; IXL_PF_LOCK_ASSERT(pf); a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt, M_DEVBUF, M_NOWAIT | M_ZERO); if (a == NULL) { device_printf(dev, "add_hw_filters failed to get memory\n"); return; } /* ** Scan the filter list, each time we find one ** we add it to the admin queue array and turn off ** the add bit. */ SLIST_FOREACH(f, &vsi->ftl, next) { if ((f->flags & flags) == flags) { b = &a[j]; // a pox on fvl long names :) bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN); if (f->vlan == IXL_VLAN_ANY) { b->vlan_tag = 0; b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; } else { b->vlan_tag = f->vlan; b->flags = 0; } b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; f->flags &= ~IXL_FILTER_ADD; j++; } if (j == cnt) break; } if (j > 0) { err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL); if (err) device_printf(dev, "aq_add_macvlan err %d, " "aq_error %d\n", err, hw->aq.asq_last_status); else - vsi->hw_filters_add += j; + vsi->num_hw_filters += j; } free(a, M_DEVBUF); return; } /* ** This routine takes removals in the vsi filter ** table and creates an Admin Queue call to delete ** the filters in the hardware. */ void ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt) { struct i40e_aqc_remove_macvlan_element_data *d, *e; struct ixl_pf *pf; struct i40e_hw *hw; device_t dev; struct ixl_mac_filter *f, *f_temp; int err, j = 0; DEBUGOUT("ixl_del_hw_filters: begin\n"); pf = vsi->back; hw = &pf->hw; dev = pf->dev; d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt, M_DEVBUF, M_NOWAIT | M_ZERO); if (d == NULL) { printf("del hw filter failed to get memory\n"); return; } SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) { if (f->flags & IXL_FILTER_DEL) { e = &d[j]; // a pox on fvl long names :) bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN); e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; if (f->vlan == IXL_VLAN_ANY) { e->vlan_tag = 0; e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; } else { e->vlan_tag = f->vlan; } /* delete entry from vsi list */ SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next); free(f, M_DEVBUF); j++; } if (j == cnt) break; } if (j > 0) { err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL); if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) { int sc = 0; for (int i = 0; i < j; i++) sc += (!d[i].error_code); - vsi->hw_filters_del += sc; + vsi->num_hw_filters -= sc; device_printf(dev, "Failed to remove %d/%d filters, aq error %d\n", j - sc, j, hw->aq.asq_last_status); } else - vsi->hw_filters_del += j; + vsi->num_hw_filters -= j; } free(d, M_DEVBUF); DEBUGOUT("ixl_del_hw_filters: end\n"); return; } int ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) { struct i40e_hw *hw = &pf->hw; int error = 0; u32 reg; u16 pf_qidx; pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); ixl_dbg(pf, IXL_DBG_EN_DIS, "Enabling PF TX ring %4d / VSI TX ring %4d...\n", pf_qidx, vsi_qidx); i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE); reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); reg |= I40E_QTX_ENA_QENA_REQ_MASK | I40E_QTX_ENA_QENA_STAT_MASK; wr32(hw, I40E_QTX_ENA(pf_qidx), reg); /* Verify the enable took */ for (int j = 0; j < 10; j++) { reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); if (reg & I40E_QTX_ENA_QENA_STAT_MASK) break; i40e_usec_delay(10); } if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { device_printf(pf->dev, "TX queue %d still disabled!\n", pf_qidx); error = ETIMEDOUT; } return (error); } int ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) { struct i40e_hw *hw = &pf->hw; int error = 0; u32 reg; u16 pf_qidx; pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); ixl_dbg(pf, IXL_DBG_EN_DIS, "Enabling PF RX ring %4d / VSI RX ring %4d...\n", pf_qidx, vsi_qidx); reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); reg |= I40E_QRX_ENA_QENA_REQ_MASK | I40E_QRX_ENA_QENA_STAT_MASK; wr32(hw, I40E_QRX_ENA(pf_qidx), reg); /* Verify the enable took */ for (int j = 0; j < 10; j++) { reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); if (reg & I40E_QRX_ENA_QENA_STAT_MASK) break; i40e_usec_delay(10); } if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { device_printf(pf->dev, "RX queue %d still disabled!\n", pf_qidx); error = ETIMEDOUT; } return (error); } int ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) { int error = 0; error = ixl_enable_tx_ring(pf, qtag, vsi_qidx); /* Called function already prints error message */ if (error) return (error); error = ixl_enable_rx_ring(pf, qtag, vsi_qidx); return (error); } /* For PF VSI only */ int ixl_enable_rings(struct ixl_vsi *vsi) { struct ixl_pf *pf = vsi->back; int error = 0; for (int i = 0; i < vsi->num_queues; i++) { error = ixl_enable_ring(pf, &pf->qtag, i); if (error) return (error); } return (error); } /* * Returns error on first ring that is detected hung. */ int ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) { struct i40e_hw *hw = &pf->hw; int error = 0; u32 reg; u16 pf_qidx; pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE); i40e_usec_delay(500); reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; wr32(hw, I40E_QTX_ENA(pf_qidx), reg); /* Verify the disable took */ for (int j = 0; j < 10; j++) { reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK)) break; i40e_msec_delay(10); } if (reg & I40E_QTX_ENA_QENA_STAT_MASK) { device_printf(pf->dev, "TX queue %d still enabled!\n", pf_qidx); error = ETIMEDOUT; } return (error); } /* * Returns error on first ring that is detected hung. */ int ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) { struct i40e_hw *hw = &pf->hw; int error = 0; u32 reg; u16 pf_qidx; pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; wr32(hw, I40E_QRX_ENA(pf_qidx), reg); /* Verify the disable took */ for (int j = 0; j < 10; j++) { reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK)) break; i40e_msec_delay(10); } if (reg & I40E_QRX_ENA_QENA_STAT_MASK) { device_printf(pf->dev, "RX queue %d still enabled!\n", pf_qidx); error = ETIMEDOUT; } return (error); } int ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) { int error = 0; error = ixl_disable_tx_ring(pf, qtag, vsi_qidx); /* Called function already prints error message */ if (error) return (error); error = ixl_disable_rx_ring(pf, qtag, vsi_qidx); return (error); } /* For PF VSI only */ int ixl_disable_rings(struct ixl_vsi *vsi) { struct ixl_pf *pf = vsi->back; int error = 0; for (int i = 0; i < vsi->num_queues; i++) { error = ixl_disable_ring(pf, &pf->qtag, i); if (error) return (error); } return (error); } /** * ixl_handle_mdd_event * * Called from interrupt handler to identify possibly malicious vfs * (But also detects events from the PF, as well) **/ void ixl_handle_mdd_event(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; bool mdd_detected = false; bool pf_mdd_detected = false; u32 reg; /* find what triggered the MDD event */ reg = rd32(hw, I40E_GL_MDET_TX); if (reg & I40E_GL_MDET_TX_VALID_MASK) { u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> I40E_GL_MDET_TX_PF_NUM_SHIFT; u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> I40E_GL_MDET_TX_EVENT_SHIFT; u16 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> I40E_GL_MDET_TX_QUEUE_SHIFT; device_printf(dev, "Malicious Driver Detection event %d" " on TX queue %d, pf number %d\n", event, queue, pf_num); wr32(hw, I40E_GL_MDET_TX, 0xffffffff); mdd_detected = true; } reg = rd32(hw, I40E_GL_MDET_RX); if (reg & I40E_GL_MDET_RX_VALID_MASK) { u8 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> I40E_GL_MDET_RX_FUNCTION_SHIFT; u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> I40E_GL_MDET_RX_EVENT_SHIFT; u16 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> I40E_GL_MDET_RX_QUEUE_SHIFT; device_printf(dev, "Malicious Driver Detection event %d" " on RX queue %d, pf number %d\n", event, queue, pf_num); wr32(hw, I40E_GL_MDET_RX, 0xffffffff); mdd_detected = true; } if (mdd_detected) { reg = rd32(hw, I40E_PF_MDET_TX); if (reg & I40E_PF_MDET_TX_VALID_MASK) { wr32(hw, I40E_PF_MDET_TX, 0xFFFF); device_printf(dev, "MDD TX event is for this function!\n"); pf_mdd_detected = true; } reg = rd32(hw, I40E_PF_MDET_RX); if (reg & I40E_PF_MDET_RX_VALID_MASK) { wr32(hw, I40E_PF_MDET_RX, 0xFFFF); device_printf(dev, "MDD RX event is for this function!\n"); pf_mdd_detected = true; } } /* re-enable mdd interrupt cause */ reg = rd32(hw, I40E_PFINT_ICR0_ENA); reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); ixl_flush(hw); } void ixl_enable_intr(struct ixl_vsi *vsi) { struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct i40e_hw *hw = vsi->hw; struct ixl_queue *que = vsi->queues; if (pf->msix > 1) { for (int i = 0; i < vsi->num_queues; i++, que++) ixl_enable_queue(hw, que->me); } else ixl_enable_intr0(hw); } void ixl_disable_rings_intr(struct ixl_vsi *vsi) { struct i40e_hw *hw = vsi->hw; struct ixl_queue *que = vsi->queues; for (int i = 0; i < vsi->num_queues; i++, que++) ixl_disable_queue(hw, que->me); } void ixl_enable_intr0(struct i40e_hw *hw) { u32 reg; /* Use IXL_ITR_NONE so ITR isn't updated here */ reg = I40E_PFINT_DYN_CTL0_INTENA_MASK | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); wr32(hw, I40E_PFINT_DYN_CTL0, reg); } void ixl_disable_intr0(struct i40e_hw *hw) { u32 reg; reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT; wr32(hw, I40E_PFINT_DYN_CTL0, reg); ixl_flush(hw); } void ixl_enable_queue(struct i40e_hw *hw, int id) { u32 reg; reg = I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); } void ixl_disable_queue(struct i40e_hw *hw, int id) { u32 reg; reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); } void ixl_update_stats_counters(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; struct ixl_vf *vf; struct i40e_hw_port_stats *nsd = &pf->stats; struct i40e_hw_port_stats *osd = &pf->stats_offsets; /* Update hw stats */ ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), pf->stat_offsets_loaded, &osd->crc_errors, &nsd->crc_errors); ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), pf->stat_offsets_loaded, &osd->illegal_bytes, &nsd->illegal_bytes); ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), I40E_GLPRT_GORCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_bytes, &nsd->eth.rx_bytes); ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), I40E_GLPRT_GOTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_bytes, &nsd->eth.tx_bytes); ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_discards, &nsd->eth.rx_discards); ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), I40E_GLPRT_UPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_unicast, &nsd->eth.rx_unicast); ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), I40E_GLPRT_UPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_unicast, &nsd->eth.tx_unicast); ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), I40E_GLPRT_MPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_multicast, &nsd->eth.rx_multicast); ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), I40E_GLPRT_MPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_multicast, &nsd->eth.tx_multicast); ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), I40E_GLPRT_BPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_broadcast, &nsd->eth.rx_broadcast); ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), I40E_GLPRT_BPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_broadcast, &nsd->eth.tx_broadcast); ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), pf->stat_offsets_loaded, &osd->tx_dropped_link_down, &nsd->tx_dropped_link_down); ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), pf->stat_offsets_loaded, &osd->mac_local_faults, &nsd->mac_local_faults); ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), pf->stat_offsets_loaded, &osd->mac_remote_faults, &nsd->mac_remote_faults); ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), pf->stat_offsets_loaded, &osd->rx_length_errors, &nsd->rx_length_errors); /* Flow control (LFC) stats */ ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), pf->stat_offsets_loaded, &osd->link_xon_rx, &nsd->link_xon_rx); ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), pf->stat_offsets_loaded, &osd->link_xon_tx, &nsd->link_xon_tx); ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), pf->stat_offsets_loaded, &osd->link_xoff_rx, &nsd->link_xoff_rx); ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), pf->stat_offsets_loaded, &osd->link_xoff_tx, &nsd->link_xoff_tx); /* Packet size stats rx */ ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), I40E_GLPRT_PRC64L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_64, &nsd->rx_size_64); ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), I40E_GLPRT_PRC127L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_127, &nsd->rx_size_127); ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), I40E_GLPRT_PRC255L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_255, &nsd->rx_size_255); ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), I40E_GLPRT_PRC511L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_511, &nsd->rx_size_511); ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), I40E_GLPRT_PRC1023L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_1023, &nsd->rx_size_1023); ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), I40E_GLPRT_PRC1522L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_1522, &nsd->rx_size_1522); ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), I40E_GLPRT_PRC9522L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_big, &nsd->rx_size_big); /* Packet size stats tx */ ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), I40E_GLPRT_PTC64L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_64, &nsd->tx_size_64); ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), I40E_GLPRT_PTC127L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_127, &nsd->tx_size_127); ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), I40E_GLPRT_PTC255L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_255, &nsd->tx_size_255); ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), I40E_GLPRT_PTC511L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_511, &nsd->tx_size_511); ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), I40E_GLPRT_PTC1023L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_1023, &nsd->tx_size_1023); ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), I40E_GLPRT_PTC1522L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_1522, &nsd->tx_size_1522); ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), I40E_GLPRT_PTC9522L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_big, &nsd->tx_size_big); ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port), pf->stat_offsets_loaded, &osd->rx_undersize, &nsd->rx_undersize); ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port), pf->stat_offsets_loaded, &osd->rx_fragments, &nsd->rx_fragments); ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port), pf->stat_offsets_loaded, &osd->rx_oversize, &nsd->rx_oversize); ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port), pf->stat_offsets_loaded, &osd->rx_jabber, &nsd->rx_jabber); + /* EEE */ + i40e_get_phy_lpi_status(hw, nsd); + + ixl_stat_update32(hw, I40E_PRTPM_TLPIC, + pf->stat_offsets_loaded, + &osd->tx_lpi_count, &nsd->tx_lpi_count); + ixl_stat_update32(hw, I40E_PRTPM_RLPIC, + pf->stat_offsets_loaded, + &osd->rx_lpi_count, &nsd->rx_lpi_count); + pf->stat_offsets_loaded = true; /* End hw stats */ /* Update vsi stats */ ixl_update_vsi_stats(vsi); for (int i = 0; i < pf->num_vfs; i++) { vf = &pf->vfs[i]; if (vf->vf_flags & VF_FLAG_ENABLED) ixl_update_eth_stats(&pf->vfs[i].vsi); } } int ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; device_t dev = pf->dev; int error = 0; /* Teardown */ if (is_up) ixl_stop(pf); ixl_teardown_queue_msix(vsi); - error = i40e_shutdown_lan_hmc(hw); - if (error) - device_printf(dev, - "Shutdown LAN HMC failed with code %d\n", error); + if (hw->hmc.hmc_obj) { + error = i40e_shutdown_lan_hmc(hw); + if (error) + device_printf(dev, + "Shutdown LAN HMC failed with code %d\n", error); + } + callout_drain(&pf->timer); + ixl_disable_intr0(hw); ixl_teardown_adminq_msix(pf); error = i40e_shutdown_adminq(hw); if (error) device_printf(dev, "Shutdown Admin queue failed with code %d\n", error); - callout_drain(&pf->timer); - /* Free ring buffers, locks and filters */ ixl_vsi_free_queues(vsi); - /* Free VSI filter list */ - ixl_free_mac_filters(vsi); - ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag); return (error); } int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf, bool is_up) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; device_t dev = pf->dev; + enum i40e_get_fw_lldp_status_resp lldp_status; int error = 0; device_printf(dev, "Rebuilding driver state...\n"); - error = i40e_pf_reset(hw); - if (error) { - device_printf(dev, "PF reset failure %s\n", - i40e_stat_str(hw, error)); - goto ixl_rebuild_hw_structs_after_reset_err; + if (!(atomic_load_acq_int(&pf->state) & IXL_PF_STATE_RECOVERY_MODE)) { + if (ixl_fw_recovery_mode(pf)) { + atomic_set_int(&pf->state, IXL_PF_STATE_RECOVERY_MODE); + pf->link_up = FALSE; + ixl_update_link_status(pf); + } } + /* Setup */ error = i40e_init_adminq(hw); if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) { device_printf(dev, "Unable to initialize Admin Queue, error %d\n", error); goto ixl_rebuild_hw_structs_after_reset_err; } i40e_clear_pxe_mode(hw); error = ixl_get_hw_capabilities(pf); if (error) { device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error); goto ixl_rebuild_hw_structs_after_reset_err; } + ixl_configure_intr0_msix(pf); + ixl_enable_intr0(hw); - error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, - hw->func_caps.num_rx_qp, 0, 0); - if (error) { - device_printf(dev, "init_lan_hmc failed: %d\n", error); - goto ixl_rebuild_hw_structs_after_reset_err; - } + /* Do not init LAN HMC and bring interface up in recovery mode */ + if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) == 0) { + error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, + hw->func_caps.num_rx_qp, 0, 0); + if (error) { + device_printf(dev, "init_lan_hmc failed: %d\n", error); + goto ixl_rebuild_hw_structs_after_reset_err; + } - error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); - if (error) { - device_printf(dev, "configure_lan_hmc failed: %d\n", error); - goto ixl_rebuild_hw_structs_after_reset_err; - } + error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); + if (error) { + device_printf(dev, "configure_lan_hmc failed: %d\n", error); + goto ixl_rebuild_hw_structs_after_reset_err; + } - /* reserve a contiguous allocation for the PF's VSI */ - error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_queues, &pf->qtag); - if (error) { - device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n", - error); - /* TODO: error handling */ - } + if (!pf->qmgr.qinfo) { + /* Init queue allocation manager */ + error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_rx_qp); + if (error) { + device_printf(dev, "Failed to init queue manager for PF queues, error %d\n", + error); + goto ixl_rebuild_hw_structs_after_reset_err; + } + } + /* reserve a contiguous allocation for the PF's VSI */ + error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_queues, &pf->qtag); + if (error) { + device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n", + error); + /* TODO: error handling */ + } else + device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n", + pf->qtag.num_allocated, pf->qtag.num_active); - device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n", - pf->qtag.num_allocated, pf->qtag.num_active); + error = ixl_switch_config(pf); + if (error) { + device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n", + error); + goto ixl_rebuild_hw_structs_after_reset_err; + } + } /* not in recovery mode */ - error = ixl_switch_config(pf); - if (error) { - device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n", - error); - goto ixl_rebuild_hw_structs_after_reset_err; - } + /* Remove default filters reinstalled by FW on reset */ + ixl_del_default_hw_filters(vsi); if (ixl_vsi_setup_queues(vsi)) { device_printf(dev, "setup queues failed!\n"); error = ENOMEM; goto ixl_rebuild_hw_structs_after_reset_err; } + ixl_vsi_add_sysctls(vsi, "pf", true); + if (pf->msix > 1) { error = ixl_setup_adminq_msix(pf); if (error) { device_printf(dev, "ixl_setup_adminq_msix() error: %d\n", error); goto ixl_rebuild_hw_structs_after_reset_err; } ixl_configure_intr0_msix(pf); ixl_enable_intr0(hw); error = ixl_setup_queue_msix(vsi); if (error) { device_printf(dev, "ixl_setup_queue_msix() error: %d\n", error); goto ixl_rebuild_hw_structs_after_reset_err; } + error = ixl_setup_queue_tqs(vsi); + if (error) { + device_printf(dev, "ixl_setup_queue_tqs() error: %d\n", + error); + goto ixl_rebuild_hw_structs_after_reset_err; + } } else { error = ixl_setup_legacy(pf); if (error) { device_printf(dev, "ixl_setup_legacy() error: %d\n", error); goto ixl_rebuild_hw_structs_after_reset_err; } } + /* Do not bring interface up in recovery mode */ + if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0) + return (error); + /* Determine link state */ if (ixl_attach_get_link_status(pf)) { error = EINVAL; /* TODO: error handling */ } i40e_aq_set_dcb_parameters(hw, TRUE, NULL); - ixl_get_fw_lldp_status(pf); + /* Query device FW LLDP status */ + if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) { + if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) { + atomic_set_int(&pf->state, + IXL_PF_STATE_FW_LLDP_DISABLED); + } else { + atomic_clear_int(&pf->state, + IXL_PF_STATE_FW_LLDP_DISABLED); + } + } + if (is_up) ixl_init(pf); device_printf(dev, "Rebuilding driver state done.\n"); + IXL_PF_LOCK(pf); + callout_reset(&pf->timer, hz, ixl_local_timer, pf); + IXL_PF_UNLOCK(pf); return (0); ixl_rebuild_hw_structs_after_reset_err: device_printf(dev, "Reload the driver to recover\n"); return (error); } void ixl_handle_empr_reset(struct ixl_pf *pf) { struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = &pf->hw; bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING); - int count = 0; - u32 reg; + int error = 0; ixl_prepare_for_reset(pf, is_up); - - /* Typically finishes within 3-4 seconds */ - while (count++ < 100) { - reg = rd32(hw, I40E_GLGEN_RSTAT) - & I40E_GLGEN_RSTAT_DEVSTATE_MASK; - if (reg) - i40e_msec_delay(100); - else - break; + /* + * i40e_pf_reset checks the type of reset and acts + * accordingly. If EMP or Core reset was performed + * doing PF reset is not necessary and it sometimes + * fails. + */ + error = i40e_pf_reset(hw); + if (error) { + device_printf(pf->dev, "PF reset failure %s\n", + i40e_stat_str(hw, error)); } - ixl_dbg(pf, IXL_DBG_INFO, - "EMPR reset wait count: %d\n", count); ixl_rebuild_hw_structs_after_reset(pf, is_up); atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING); } /* ** Tasklet handler for MSIX Adminq interrupts ** - do outside interrupt since it might sleep */ void ixl_do_adminq(void *context, int pending) { struct ixl_pf *pf = context; struct i40e_hw *hw = &pf->hw; struct i40e_arq_event_info event; i40e_status ret; device_t dev = pf->dev; u32 loop = 0; - u16 opcode, result; + u16 opcode, arq_pending; if (pf->state & IXL_PF_STATE_EMPR_RESETTING) { /* Flag cleared at end of this function */ ixl_handle_empr_reset(pf); return; } /* Admin Queue handling */ event.buf_len = IXL_AQ_BUF_SZ; event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT | M_ZERO); if (!event.msg_buf) { device_printf(dev, "%s: Unable to allocate memory for Admin" " Queue event!\n", __func__); return; } IXL_PF_LOCK(pf); /* clean and process any events */ do { - ret = i40e_clean_arq_element(hw, &event, &result); + ret = i40e_clean_arq_element(hw, &event, &arq_pending); if (ret) break; opcode = LE16_TO_CPU(event.desc.opcode); ixl_dbg(pf, IXL_DBG_AQ, "Admin Queue event: %#06x\n", opcode); switch (opcode) { case i40e_aqc_opc_get_link_status: ixl_link_event(pf, &event); break; case i40e_aqc_opc_send_msg_to_pf: #ifdef PCI_IOV ixl_handle_vf_msg(pf, &event); #endif break; case i40e_aqc_opc_event_lan_overflow: default: break; } - } while (result && (loop++ < IXL_ADM_LIMIT)); + } while (arq_pending && (loop++ < IXL_ADM_LIMIT)); free(event.msg_buf, M_DEVBUF); - /* - * If there are still messages to process, reschedule ourselves. - * Otherwise, re-enable our interrupt. - */ - if (result > 0) + /* If there are still messages to process, reschedule. */ + if (arq_pending > 0) taskqueue_enqueue(pf->tq, &pf->adminq); else ixl_enable_intr0(hw); IXL_PF_UNLOCK(pf); } /** * Update VSI-specific ethernet statistics counters. **/ void ixl_update_eth_stats(struct ixl_vsi *vsi) { struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_eth_stats *es; struct i40e_eth_stats *oes; - struct i40e_hw_port_stats *nsd; u16 stat_idx = vsi->info.stat_counter_idx; es = &vsi->eth_stats; oes = &vsi->eth_stats_offsets; - nsd = &pf->stats; /* Gather up the stats that the hw collects */ ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx), vsi->stat_offsets_loaded, &oes->tx_errors, &es->tx_errors); ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx), vsi->stat_offsets_loaded, &oes->rx_discards, &es->rx_discards); ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx), I40E_GLV_GORCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_bytes, &es->rx_bytes); ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), I40E_GLV_UPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_unicast, &es->rx_unicast); ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), I40E_GLV_MPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_multicast, &es->rx_multicast); ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), I40E_GLV_BPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_broadcast, &es->rx_broadcast); ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), I40E_GLV_GOTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_bytes, &es->tx_bytes); ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), I40E_GLV_UPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_unicast, &es->tx_unicast); ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), I40E_GLV_MPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_multicast, &es->tx_multicast); ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), I40E_GLV_BPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_broadcast, &es->tx_broadcast); vsi->stat_offsets_loaded = true; } void ixl_update_vsi_stats(struct ixl_vsi *vsi) { struct ixl_pf *pf; struct ifnet *ifp; struct i40e_eth_stats *es; u64 tx_discards; struct i40e_hw_port_stats *nsd; pf = vsi->back; ifp = vsi->ifp; es = &vsi->eth_stats; nsd = &pf->stats; ixl_update_eth_stats(vsi); tx_discards = es->tx_discards + nsd->tx_dropped_link_down; for (int i = 0; i < vsi->num_queues; i++) tx_discards += vsi->queues[i].txr.br->br_drops; /* Update ifnet stats */ IXL_SET_IPACKETS(vsi, es->rx_unicast + es->rx_multicast + es->rx_broadcast); IXL_SET_OPACKETS(vsi, es->tx_unicast + es->tx_multicast + es->tx_broadcast); IXL_SET_IBYTES(vsi, es->rx_bytes); IXL_SET_OBYTES(vsi, es->tx_bytes); IXL_SET_IMCASTS(vsi, es->rx_multicast); IXL_SET_OMCASTS(vsi, es->tx_multicast); IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes + nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments + nsd->rx_jabber); IXL_SET_OERRORS(vsi, es->tx_errors); IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards); IXL_SET_OQDROPS(vsi, tx_discards); IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol); IXL_SET_COLLISIONS(vsi, 0); } /** * Reset all of the stats for the given pf **/ void ixl_pf_reset_stats(struct ixl_pf *pf) { bzero(&pf->stats, sizeof(struct i40e_hw_port_stats)); bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats)); pf->stat_offsets_loaded = false; } /** * Resets all stats of the given vsi **/ void ixl_vsi_reset_stats(struct ixl_vsi *vsi) { bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats)); bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats)); vsi->stat_offsets_loaded = false; } /** * Read and update a 48 bit stat from the hw * * Since the device stats are not reset at PFReset, they likely will not * be zeroed when the driver starts. We'll save the first values read * and use them as offsets to be subtracted from the raw values in order * to report stats that count from zero. **/ void ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, bool offset_loaded, u64 *offset, u64 *stat) { u64 new_data; #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__) new_data = rd64(hw, loreg); #else /* * Use two rd32's instead of one rd64; FreeBSD versions before * 10 don't support 64-bit bus reads/writes. */ new_data = rd32(hw, loreg); new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; #endif if (!offset_loaded) *offset = new_data; if (new_data >= *offset) *stat = new_data - *offset; else *stat = (new_data + ((u64)1 << 48)) - *offset; *stat &= 0xFFFFFFFFFFFFULL; } /** * Read and update a 32 bit stat from the hw **/ void ixl_stat_update32(struct i40e_hw *hw, u32 reg, bool offset_loaded, u64 *offset, u64 *stat) { u32 new_data; new_data = rd32(hw, reg); if (!offset_loaded) *offset = new_data; if (new_data >= *offset) *stat = (u32)(new_data - *offset); else *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); } void ixl_add_device_sysctls(struct ixl_pf *pf) { device_t dev = pf->dev; struct i40e_hw *hw = &pf->hw; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid_list *ctx_list = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); struct sysctl_oid *debug_node; struct sysctl_oid_list *debug_list; struct sysctl_oid *fec_node; struct sysctl_oid_list *fec_list; + struct sysctl_oid *eee_node; + struct sysctl_oid_list *eee_list; + /* Set up sysctls */ SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "supported_speeds", CTLTYPE_INT | CTLFLAG_RD, pf, 0, ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_current_speed, "A", "Current Port Speed"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_show_fw, "A", "Firmware version"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD, pf, 0, ixl_sysctl_unallocated_queues, "I", "Queues not allocated to a PF or VF"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_pf_tx_itr, "I", "Immediately set TX ITR value for all queues"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_pf_rx_itr, "I", "Immediately set RX ITR value for all queues"); SYSCTL_ADD_INT(ctx, ctx_list, OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW, &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR"); SYSCTL_ADD_INT(ctx, ctx_list, OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW, &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR"); SYSCTL_ADD_INT(ctx, ctx_list, OID_AUTO, "tx_ring_size", CTLFLAG_RD, &pf->vsi.num_tx_desc, 0, "TX ring size"); SYSCTL_ADD_INT(ctx, ctx_list, OID_AUTO, "rx_ring_size", CTLFLAG_RD, &pf->vsi.num_rx_desc, 0, "RX ring size"); /* Add FEC sysctls for 25G adapters */ if (i40e_is_25G_device(hw->device_id)) { fec_node = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls"); fec_list = SYSCTL_CHILDREN(fec_node); SYSCTL_ADD_PROC(ctx, fec_list, OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled"); SYSCTL_ADD_PROC(ctx, fec_list, OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled"); SYSCTL_ADD_PROC(ctx, fec_list, OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link"); SYSCTL_ADD_PROC(ctx, fec_list, OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link"); SYSCTL_ADD_PROC(ctx, fec_list, OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes"); } SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP); + eee_node = SYSCTL_ADD_NODE(ctx, ctx_list, + OID_AUTO, "eee", CTLFLAG_RD, NULL, + "Energy Efficient Ethernet (EEE) Sysctls"); + eee_list = SYSCTL_CHILDREN(eee_node); + + SYSCTL_ADD_PROC(ctx, eee_list, + OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW, + pf, 0, ixl_sysctl_eee_enable, "I", + "Enable Energy Efficient Ethernet (EEE)"); + + SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status", + CTLFLAG_RD, &pf->stats.tx_lpi_status, 0, + "TX LPI status"); + + SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status", + CTLFLAG_RD, &pf->stats.rx_lpi_status, 0, + "RX LPI status"); + + SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count", + CTLFLAG_RD, &pf->stats.tx_lpi_count, + "TX LPI count"); + + SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count", + CTLFLAG_RD, &pf->stats.rx_lpi_count, + "RX LPI count"); /* Add sysctls meant to print debug information, but don't list them * in "sysctl -a" output. */ debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls"); debug_list = SYSCTL_CHILDREN(debug_node); SYSCTL_ADD_UINT(ctx, debug_list, OID_AUTO, "shared_debug_mask", CTLFLAG_RW, &pf->hw.debug_mask, 0, "Shared code debug message level"); SYSCTL_ADD_UINT(ctx, debug_list, OID_AUTO, "core_debug_mask", CTLFLAG_RW, &pf->dbg_mask, 0, "Non-hared code debug message level"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_hkey, "A", "View RSS key"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD, pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR, pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW"); if (pf->has_i2c) { SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_read_i2c_byte, "I", "Read byte from I2C bus"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_write_i2c_byte, "I", "Write byte to I2C bus"); + + SYSCTL_ADD_PROC(ctx, debug_list, + OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD, + pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW"); } #ifdef PCI_IOV SYSCTL_ADD_UINT(ctx, debug_list, OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl, 0, "PF/VF Virtual Channel debug level"); #endif } /* * Primarily for finding out how many queues can be assigned to VFs, * at runtime. */ static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int queues; IXL_PF_LOCK(pf); queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr); IXL_PF_UNLOCK(pf); return sysctl_handle_int(oidp, NULL, queues, req); } /* ** Set flow control using sysctl: ** 0 - off ** 1 - rx pause ** 2 - tx pause ** 3 - full */ int ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; int requested_fc, error = 0; enum i40e_status_code aq_error = 0; u8 fc_aq_err = 0; /* Get request */ requested_fc = pf->fc; error = sysctl_handle_int(oidp, &requested_fc, 0, req); if ((error) || (req->newptr == NULL)) return (error); + if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0) { + device_printf(dev, "Interface is currently in FW recovery mode. " + "Setting flow control not supported\n"); + return (EINVAL); + } if (requested_fc < 0 || requested_fc > 3) { device_printf(dev, "Invalid fc mode; valid modes are 0 through 3\n"); return (EINVAL); } /* Set fc ability for port */ hw->fc.requested_mode = requested_fc; aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE); if (aq_error) { device_printf(dev, "%s: Error setting new fc mode %d; fc_err %#x\n", __func__, aq_error, fc_aq_err); return (EIO); } pf->fc = requested_fc; /* Get new link state */ i40e_msec_delay(250); hw->phy.get_link_info = TRUE; i40e_get_link_status(hw, &pf->link_up); return (0); } -char * -ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed) -{ - int index; - char *speeds[] = { +static const char * +ixl_link_speed_string(u8 link_speed) +{ + const char * link_speed_str[] = { "Unknown", "100 Mbps", "1 Gbps", "10 Gbps", "40 Gbps", "20 Gbps", "25 Gbps", + "2.5 Gbps", + "5 Gbps" }; + int index; switch (link_speed) { - case I40E_LINK_SPEED_100MB: - index = 1; - break; - case I40E_LINK_SPEED_1GB: - index = 2; - break; - case I40E_LINK_SPEED_10GB: - index = 3; - break; - case I40E_LINK_SPEED_40GB: - index = 4; - break; - case I40E_LINK_SPEED_20GB: - index = 5; - break; - case I40E_LINK_SPEED_25GB: - index = 6; - break; - case I40E_LINK_SPEED_UNKNOWN: - default: - index = 0; - break; + case I40E_LINK_SPEED_100MB: + index = 1; + break; + case I40E_LINK_SPEED_1GB: + index = 2; + break; + case I40E_LINK_SPEED_10GB: + index = 3; + break; + case I40E_LINK_SPEED_40GB: + index = 4; + break; + case I40E_LINK_SPEED_20GB: + index = 5; + break; + case I40E_LINK_SPEED_25GB: + index = 6; + break; + case I40E_LINK_SPEED_2_5GB: + index = 7; + break; + case I40E_LINK_SPEED_5GB: + index = 8; + break; + case I40E_LINK_SPEED_UNKNOWN: + default: + index = 0; + break; } - return speeds[index]; + return (link_speed_str[index]); } int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; int error = 0; ixl_update_link_status(pf); error = sysctl_handle_string(oidp, - ixl_aq_speed_to_str(hw->phy.link_info.link_speed), + __DECONST(void *, + ixl_link_speed_string(hw->phy.link_info.link_speed)), 8, req); + return (error); } /* * Converts 8-bit speeds value to and from sysctl flags and * Admin Queue flags. */ static u8 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq) { - static u16 speedmap[6] = { +#define SPEED_MAP_SIZE 8 + static u16 speedmap[SPEED_MAP_SIZE] = { (I40E_LINK_SPEED_100MB | (0x1 << 8)), (I40E_LINK_SPEED_1GB | (0x2 << 8)), (I40E_LINK_SPEED_10GB | (0x4 << 8)), (I40E_LINK_SPEED_20GB | (0x8 << 8)), (I40E_LINK_SPEED_25GB | (0x10 << 8)), - (I40E_LINK_SPEED_40GB | (0x20 << 8)) + (I40E_LINK_SPEED_40GB | (0x20 << 8)), + (I40E_LINK_SPEED_2_5GB | (0x40 << 8)), + (I40E_LINK_SPEED_5GB | (0x80 << 8)), }; u8 retval = 0; - for (int i = 0; i < 6; i++) { + for (int i = 0; i < SPEED_MAP_SIZE; i++) { if (to_aq) retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0; else retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0; } return (retval); } int ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_aq_set_phy_config config; enum i40e_status_code aq_error = 0; /* Get current capability information */ aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL); if (aq_error) { device_printf(dev, "%s: Error getting phy capabilities %d," " aq error: %d\n", __func__, aq_error, hw->aq.asq_last_status); return (EIO); } /* Prepare new config */ bzero(&config, sizeof(config)); if (from_aq) config.link_speed = speeds; else config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true); config.phy_type = abilities.phy_type; config.phy_type_ext = abilities.phy_type_ext; config.abilities = abilities.abilities | I40E_AQ_PHY_ENABLE_ATOMIC_LINK; config.eee_capability = abilities.eee_capability; config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; - config.fec_config = (abilities.fec_cfg_curr_mod_ext_info & 0x1e); + config.fec_config = abilities.fec_cfg_curr_mod_ext_info + & I40E_AQ_PHY_FEC_CONFIG_MASK; /* Do aq command & restart link */ aq_error = i40e_aq_set_phy_config(hw, &config, NULL); if (aq_error) { device_printf(dev, "%s: Error setting new phy config %d," " aq error: %d\n", __func__, aq_error, hw->aq.asq_last_status); return (EIO); } return (0); } /* -** Supported link speedsL +** Supported link speeds ** Flags: ** 0x1 - 100 Mb ** 0x2 - 1G ** 0x4 - 10G ** 0x8 - 20G ** 0x10 - 25G ** 0x20 - 40G +** 0x40 - 2.5G +** 0x80 - 5G */ static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); return sysctl_handle_int(oidp, NULL, supported, req); } /* ** Control link advertise speed: ** Flags: ** 0x1 - advertise 100 Mb ** 0x2 - advertise 1G ** 0x4 - advertise 10G ** 0x8 - advertise 20G ** 0x10 - advertise 25G ** 0x20 - advertise 40G +** 0x40 - advertise 2.5G +** 0x80 - advertise 5G ** ** Set to 0 to disable link */ int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; u8 converted_speeds; int requested_ls = 0; int error = 0; /* Read in new mode */ requested_ls = pf->advertised_speed; error = sysctl_handle_int(oidp, &requested_ls, 0, req); if ((error) || (req->newptr == NULL)) return (error); + if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0) { + device_printf(dev, "Interface is currently in FW recovery mode. " + "Setting advertise speed not supported\n"); + return (EINVAL); + } /* Error out if bits outside of possible flag range are set */ - if ((requested_ls & ~((u8)0x3F)) != 0) { + if ((requested_ls & ~((u8)0xFF)) != 0) { device_printf(dev, "Input advertised speed out of range; " "valid flags are: 0x%02x\n", ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); return (EINVAL); } /* Check if adapter supports input value */ converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true); if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) { device_printf(dev, "Invalid advertised speed; " "valid flags are: 0x%02x\n", ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); return (EINVAL); } error = ixl_set_advertised_speeds(pf, requested_ls, false); if (error) return (error); pf->advertised_speed = requested_ls; ixl_update_link_status(pf); return (0); } /* * Input: bitmap of enum i40e_aq_link_speed */ static u64 ixl_max_aq_speed_to_value(u8 link_speeds) { if (link_speeds & I40E_LINK_SPEED_40GB) return IF_Gbps(40); if (link_speeds & I40E_LINK_SPEED_25GB) return IF_Gbps(25); if (link_speeds & I40E_LINK_SPEED_20GB) return IF_Gbps(20); if (link_speeds & I40E_LINK_SPEED_10GB) return IF_Gbps(10); + if (link_speeds & I40E_LINK_SPEED_5GB) + return IF_Gbps(5); + if (link_speeds & I40E_LINK_SPEED_2_5GB) + return IF_Mbps(2500); if (link_speeds & I40E_LINK_SPEED_1GB) return IF_Gbps(1); if (link_speeds & I40E_LINK_SPEED_100MB) return IF_Mbps(100); else /* Minimum supported link speed */ return IF_Mbps(100); } /* ** Get the width and transaction speed of ** the bus this adapter is plugged into. */ void ixl_get_bus_info(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; u16 link; u32 offset, num_ports; u64 max_speed; /* Some devices don't use PCIE */ if (hw->mac.type == I40E_MAC_X722) return; /* Read PCI Express Capabilities Link Status Register */ pci_find_cap(dev, PCIY_EXPRESS, &offset); link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); /* Fill out hw struct with PCIE info */ i40e_set_pci_config_data(hw, link); /* Use info to print out bandwidth messages */ device_printf(dev,"PCI Express Bus: Speed %s %s\n", ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s": (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s": (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"), (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" : (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" : (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" : (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" : ("Unknown")); /* * If adapter is in slot with maximum supported speed, * no warning message needs to be printed out. */ if (hw->bus.speed >= i40e_bus_speed_8000 && hw->bus.width >= i40e_bus_width_pcie_x8) return; num_ports = bitcount32(hw->func_caps.valid_functions); max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000; if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) { device_printf(dev, "PCI-Express bandwidth available" " for this device may be insufficient for" " optimal performance.\n"); device_printf(dev, "Please move the device to a different" " PCI-e link with more lanes and/or higher" " transfer rate.\n"); } } static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; struct sbuf *sbuf; sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); ixl_nvm_version_str(hw, sbuf); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } void ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma) { if ((nvma->command == I40E_NVM_READ) && ((nvma->config & 0xFF) == 0xF) && (((nvma->config & 0xF00) >> 8) == 0xF) && (nvma->offset == 0) && (nvma->data_size == 1)) { // device_printf(dev, "- Get Driver Status Command\n"); } else if (nvma->command == I40E_NVM_READ) { } else { switch (nvma->command) { case 0xB: device_printf(dev, "- command: I40E_NVM_READ\n"); break; case 0xC: device_printf(dev, "- command: I40E_NVM_WRITE\n"); break; default: device_printf(dev, "- command: unknown 0x%08x\n", nvma->command); break; } device_printf(dev, "- config (ptr) : 0x%02x\n", nvma->config & 0xFF); device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8); device_printf(dev, "- offset : 0x%08x\n", nvma->offset); device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size); } } int ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd) { struct i40e_hw *hw = &pf->hw; struct i40e_nvm_access *nvma; device_t dev = pf->dev; enum i40e_status_code status = 0; size_t nvma_size, ifd_len, exp_len; int err, perrno; DEBUGFUNC("ixl_handle_nvmupd_cmd"); /* Sanity checks */ nvma_size = sizeof(struct i40e_nvm_access); ifd_len = ifd->ifd_len; if (ifd_len < nvma_size || ifd->ifd_data == NULL) { device_printf(dev, "%s: incorrect ifdrv length or data pointer\n", __func__); device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n", __func__, ifd_len, nvma_size); device_printf(dev, "%s: data pointer: %p\n", __func__, ifd->ifd_data); return (EINVAL); } nvma = malloc(ifd_len, M_DEVBUF, M_WAITOK); err = copyin(ifd->ifd_data, nvma, ifd_len); if (err) { device_printf(dev, "%s: Cannot get request from user space\n", __func__); free(nvma, M_DEVBUF); return (err); } if (pf->dbg_mask & IXL_DBG_NVMUPD) ixl_print_nvm_cmd(dev, nvma); if (pf->state & IXL_PF_STATE_EMPR_RESETTING) { int count = 0; while (count++ < 100) { i40e_msec_delay(100); if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) break; } } if (pf->state & IXL_PF_STATE_EMPR_RESETTING) { free(nvma, M_DEVBUF); return (-EBUSY); } if (nvma->data_size < 1 || nvma->data_size > 4096) { device_printf(dev, "%s: invalid request, data size not in supported range\n", __func__); free(nvma, M_DEVBUF); return (EINVAL); } /* * Older versions of the NVM update tool don't set ifd_len to the size * of the entire buffer passed to the ioctl. Check the data_size field * in the contained i40e_nvm_access struct and ensure everything is * copied in from userspace. */ exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */ if (ifd_len < exp_len) { ifd_len = exp_len; nvma = realloc(nvma, ifd_len, M_DEVBUF, M_WAITOK); err = copyin(ifd->ifd_data, nvma, ifd_len); if (err) { device_printf(dev, "%s: Cannot get request from user space\n", __func__); free(nvma, M_DEVBUF); return (err); } } IXL_PF_LOCK(pf); status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno); IXL_PF_UNLOCK(pf); err = copyout(nvma, ifd->ifd_data, ifd_len); free(nvma, M_DEVBUF); if (err) { device_printf(dev, "%s: Cannot return data to user space\n", __func__); return (err); } /* Let the nvmupdate report errors, show them only when debug is enabled */ if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0) device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n", i40e_stat_str(hw, status), perrno); /* * -EPERM is actually ERESTART, which the kernel interprets as it needing * to run this ioctl again. So use -EACCES for -EPERM instead. */ if (perrno == -EPERM) return (-EACCES); else return (perrno); } +int +ixl_handle_i2c_eeprom_read_cmd(struct ixl_pf *pf, struct ifreq *ifr) +{ + struct ifi2creq i2c; + int error = 0; + int i; + + if (pf->read_i2c_byte == NULL) + return (EINVAL); + +#ifdef ifr_data + error = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); +#else + error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); +#endif + + if (error != 0) + return (error); + if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { + error = EINVAL; + return (error); + } + if (i2c.len > sizeof(i2c.data)) { + error = EINVAL; + return (error); + } + + for (i = 0; i < i2c.len; ++i) { + if (pf->read_i2c_byte(pf, i2c.offset + i, + i2c.dev_addr, &i2c.data[i])) + return (EIO); + } + +#ifdef ifr_data + error = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); +#else + error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); +#endif + + return (error); +} + /********************************************************************* * * Media Ioctl callback * * This routine is called whenever the user queries the status of * the interface using ifconfig. * * When adding new media types here, make sure to add them to * ixl_add_ifmedia(), too. * **********************************************************************/ void ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) { struct ixl_vsi *vsi = ifp->if_softc; struct ixl_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; INIT_DEBUGOUT("ixl_media_status: begin"); /* Don't touch PF during reset */ if (atomic_load_acq_int(&pf->state) & IXL_PF_STATE_EMPR_RESETTING) return; IXL_PF_LOCK(pf); i40e_get_link_status(hw, &pf->link_up); ixl_update_link_status(pf); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!pf->link_up) { IXL_PF_UNLOCK(pf); return; } ifmr->ifm_status |= IFM_ACTIVE; /* Hardware always does full-duplex */ ifmr->ifm_active |= IFM_FDX; switch (hw->phy.link_info.phy_type) { /* 100 M */ case I40E_PHY_TYPE_100BASE_TX: ifmr->ifm_active |= IFM_100_TX; break; /* 1 G */ case I40E_PHY_TYPE_1000BASE_T: ifmr->ifm_active |= IFM_1000_T; break; case I40E_PHY_TYPE_1000BASE_SX: ifmr->ifm_active |= IFM_1000_SX; break; case I40E_PHY_TYPE_1000BASE_LX: ifmr->ifm_active |= IFM_1000_LX; break; case I40E_PHY_TYPE_1000BASE_T_OPTICAL: ifmr->ifm_active |= IFM_1000_T; break; + /* 2.5 G */ + case I40E_PHY_TYPE_2_5GBASE_T: + ifmr->ifm_active |= IFM_2500_T; + break; + /* 5 G */ + case I40E_PHY_TYPE_5GBASE_T: + ifmr->ifm_active |= IFM_5000_T; + break; /* 10 G */ case I40E_PHY_TYPE_10GBASE_SFPP_CU: ifmr->ifm_active |= IFM_10G_TWINAX; break; case I40E_PHY_TYPE_10GBASE_SR: ifmr->ifm_active |= IFM_10G_SR; break; case I40E_PHY_TYPE_10GBASE_LR: ifmr->ifm_active |= IFM_10G_LR; break; case I40E_PHY_TYPE_10GBASE_T: ifmr->ifm_active |= IFM_10G_T; break; case I40E_PHY_TYPE_XAUI: case I40E_PHY_TYPE_XFI: ifmr->ifm_active |= IFM_10G_TWINAX; break; case I40E_PHY_TYPE_10GBASE_AOC: ifmr->ifm_active |= IFM_10G_AOC; break; /* 25 G */ case I40E_PHY_TYPE_25GBASE_KR: ifmr->ifm_active |= IFM_25G_KR; break; case I40E_PHY_TYPE_25GBASE_CR: ifmr->ifm_active |= IFM_25G_CR; break; case I40E_PHY_TYPE_25GBASE_SR: ifmr->ifm_active |= IFM_25G_SR; break; case I40E_PHY_TYPE_25GBASE_LR: ifmr->ifm_active |= IFM_25G_LR; break; case I40E_PHY_TYPE_25GBASE_AOC: ifmr->ifm_active |= IFM_25G_AOC; break; case I40E_PHY_TYPE_25GBASE_ACC: ifmr->ifm_active |= IFM_25G_ACC; break; /* 40 G */ case I40E_PHY_TYPE_40GBASE_CR4: case I40E_PHY_TYPE_40GBASE_CR4_CU: ifmr->ifm_active |= IFM_40G_CR4; break; case I40E_PHY_TYPE_40GBASE_SR4: ifmr->ifm_active |= IFM_40G_SR4; break; case I40E_PHY_TYPE_40GBASE_LR4: ifmr->ifm_active |= IFM_40G_LR4; break; case I40E_PHY_TYPE_XLAUI: ifmr->ifm_active |= IFM_OTHER; break; case I40E_PHY_TYPE_1000BASE_KX: ifmr->ifm_active |= IFM_1000_KX; break; case I40E_PHY_TYPE_SGMII: ifmr->ifm_active |= IFM_1000_SGMII; break; /* ERJ: What's the difference between these? */ case I40E_PHY_TYPE_10GBASE_CR1_CU: case I40E_PHY_TYPE_10GBASE_CR1: ifmr->ifm_active |= IFM_10G_CR1; break; case I40E_PHY_TYPE_10GBASE_KX4: ifmr->ifm_active |= IFM_10G_KX4; break; case I40E_PHY_TYPE_10GBASE_KR: ifmr->ifm_active |= IFM_10G_KR; break; case I40E_PHY_TYPE_SFI: ifmr->ifm_active |= IFM_10G_SFI; break; /* Our single 20G media type */ case I40E_PHY_TYPE_20GBASE_KR2: ifmr->ifm_active |= IFM_20G_KR2; break; case I40E_PHY_TYPE_40GBASE_KR4: ifmr->ifm_active |= IFM_40G_KR4; break; case I40E_PHY_TYPE_XLPPI: case I40E_PHY_TYPE_40GBASE_AOC: ifmr->ifm_active |= IFM_40G_XLPPI; break; /* Unknown to driver */ default: ifmr->ifm_active |= IFM_UNKNOWN; break; } /* Report flow control status as well */ if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ifmr->ifm_active |= IFM_ETH_TXPAUSE; if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ifmr->ifm_active |= IFM_ETH_RXPAUSE; IXL_PF_UNLOCK(pf); } void ixl_init(void *arg) { struct ixl_pf *pf = arg; IXL_PF_LOCK(pf); ixl_init_locked(pf); IXL_PF_UNLOCK(pf); } /* * NOTE: Fortville does not support forcing media speeds. Instead, * use the set_advertise sysctl to set the speeds Fortville * will advertise or be allowed to operate at. */ int ixl_media_change(struct ifnet * ifp) { struct ixl_vsi *vsi = ifp->if_softc; struct ifmedia *ifm = &vsi->media; INIT_DEBUGOUT("ixl_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); if_printf(ifp, "Use 'advertise_speed' sysctl to change advertised speeds\n"); return (ENODEV); } /********************************************************************* * Ioctl entry point * * ixl_ioctl is called when the user wants to configure the * interface. * * return 0 on success, positive on failure **********************************************************************/ int ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data) { struct ixl_vsi *vsi = ifp->if_softc; struct ixl_pf *pf = vsi->back; struct ifreq *ifr = (struct ifreq *)data; struct ifdrv *ifd = (struct ifdrv *)data; #if defined(INET) || defined(INET6) struct ifaddr *ifa = (struct ifaddr *)data; bool avoid_reset = FALSE; #endif int error = 0; + if ((atomic_load_acq_int(&pf->state) & IXL_PF_STATE_RECOVERY_MODE) != 0) { + /* We are in recovery mode supporting only NVM update */ + switch (command) { + case SIOCSDRVSPEC: + case SIOCGDRVSPEC: + IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific " + "Info)\n"); + + /* NVM update command */ + if (ifd->ifd_cmd == I40E_NVM_ACCESS) + error = ixl_handle_nvmupd_cmd(pf, ifd); + else + error = EINVAL; + break; + default: + error = EINVAL; + break; + } + + return (error); + } + switch (command) { case SIOCSIFADDR: IOCTL_DEBUGOUT("ioctl: SIOCSIFADDR (Set Interface Address)"); #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) avoid_reset = TRUE; #endif #ifdef INET6 if (ifa->ifa_addr->sa_family == AF_INET6) avoid_reset = TRUE; #endif #if defined(INET) || defined(INET6) /* ** Calling init results in link renegotiation, ** so we avoid doing it when possible. */ if (avoid_reset) { ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ixl_init(pf); #ifdef INET if (!(ifp->if_flags & IFF_NOARP)) arp_ifinit(ifp, ifa); #endif } else error = ether_ioctl(ifp, command, data); break; #endif case SIOCSIFMTU: IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); if (ifr->ifr_mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) { error = EINVAL; } else { IXL_PF_LOCK(pf); ifp->if_mtu = ifr->ifr_mtu; vsi->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; ixl_init_locked(pf); IXL_PF_UNLOCK(pf); } break; case SIOCSIFFLAGS: IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); IXL_PF_LOCK(pf); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { if ((ifp->if_flags ^ pf->if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { ixl_set_promisc(vsi); } - } else { - IXL_PF_UNLOCK(pf); - ixl_init(pf); - IXL_PF_LOCK(pf); - } - } else { - if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - ixl_stop_locked(pf); - } - } + } else + ixl_init_locked(pf); + } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) + ixl_stop_locked(pf); + pf->if_flags = ifp->if_flags; IXL_PF_UNLOCK(pf); break; case SIOCSDRVSPEC: case SIOCGDRVSPEC: IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific " "Info)\n"); /* NVM update command */ if (ifd->ifd_cmd == I40E_NVM_ACCESS) error = ixl_handle_nvmupd_cmd(pf, ifd); else error = EINVAL; break; case SIOCADDMULTI: IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { IXL_PF_LOCK(pf); ixl_disable_rings_intr(vsi); ixl_add_multi(vsi); ixl_enable_intr(vsi); IXL_PF_UNLOCK(pf); } break; case SIOCDELMULTI: IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { IXL_PF_LOCK(pf); ixl_disable_rings_intr(vsi); ixl_del_multi(vsi); ixl_enable_intr(vsi); IXL_PF_UNLOCK(pf); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: case SIOCGIFXMEDIA: IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); error = ifmedia_ioctl(ifp, ifr, &vsi->media, command); break; case SIOCSIFCAP: { int mask = ifr->ifr_reqcap ^ ifp->if_capenable; IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); ixl_cap_txcsum_tso(vsi, ifp, mask); if (mask & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; if (mask & IFCAP_RXCSUM_IPV6) ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; if (mask & IFCAP_LRO) ifp->if_capenable ^= IFCAP_LRO; if (mask & IFCAP_VLAN_HWTAGGING) ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (mask & IFCAP_VLAN_HWFILTER) ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; if (mask & IFCAP_VLAN_HWTSO) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { IXL_PF_LOCK(pf); ixl_init_locked(pf); IXL_PF_UNLOCK(pf); } VLAN_CAPABILITIES(ifp); break; } #if __FreeBSD_version >= 1003000 case SIOCGI2C: { - struct ifi2creq i2c; - int i; - IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)"); if (!pf->has_i2c) return (ENOTTY); - error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); - if (error != 0) - break; - if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { - error = EINVAL; - break; - } - if (i2c.len > sizeof(i2c.data)) { - error = EINVAL; - break; - } - - for (i = 0; i < i2c.len; i++) - if (ixl_read_i2c_byte(pf, i2c.offset + i, - i2c.dev_addr, &i2c.data[i])) - return (EIO); - - error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); + error = ixl_handle_i2c_eeprom_read_cmd(pf, ifr); break; } #endif default: IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command); error = ether_ioctl(ifp, command, data); break; } return (error); } int ixl_find_i2c_interface(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; bool i2c_en, port_matched; u32 reg; for (int i = 0; i < 4; i++) { reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i)); i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK); port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK) >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) & BIT(hw->port); if (i2c_en && port_matched) return (i); } return (-1); } static char * ixl_phy_type_string(u32 bit_pos, bool ext) { static char * phy_types_str[32] = { "SGMII", "1000BASE-KX", "10GBASE-KX4", "10GBASE-KR", "40GBASE-KR4", "XAUI", "XFI", "SFI", "XLAUI", "XLPPI", "40GBASE-CR4", "10GBASE-CR1", "SFP+ Active DA", "QSFP+ Active DA", "Reserved (14)", "Reserved (15)", "Reserved (16)", "100BASE-TX", "1000BASE-T", "10GBASE-T", "10GBASE-SR", "10GBASE-LR", "10GBASE-SFP+Cu", "10GBASE-CR1", "40GBASE-CR4", "40GBASE-SR4", "40GBASE-LR4", "1000BASE-SX", "1000BASE-LX", "1000BASE-T Optical", "20GBASE-KR2", "Reserved (31)" }; static char * ext_phy_types_str[8] = { "25GBASE-KR", "25GBASE-CR", "25GBASE-SR", "25GBASE-LR", "25GBASE-AOC", "25GBASE-ACC", - "Reserved (6)", - "Reserved (7)" + "2.5GBASE-T", + "5GBASE-T" }; if (ext && bit_pos > 7) return "Invalid_Ext"; if (bit_pos > 31) return "Invalid"; return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos]; } +static char * +ixl_phy_type_string_ls(u8 val) +{ + if (val >= 0x1F) + return ixl_phy_type_string(val - 0x1F, true); + else + return ixl_phy_type_string(val, false); +} + int ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status) { device_t dev = pf->dev; struct i40e_hw *hw = &pf->hw; struct i40e_aq_desc desc; enum i40e_status_code status; struct i40e_aqc_get_link_status *aq_link_status = (struct i40e_aqc_get_link_status *)&desc.params.raw; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); if (status) { device_printf(dev, "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); return (EIO); } bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status)); return (0); } -static char * -ixl_phy_type_string_ls(u8 val) -{ - if (val >= 0x1F) - return ixl_phy_type_string(val - 0x1F, true); - else - return ixl_phy_type_string(val, false); -} - static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; struct sbuf *buf; int error = 0; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); return (ENOMEM); } struct i40e_aqc_get_link_status link_status; error = ixl_aq_get_link_status(pf, &link_status); if (error) { sbuf_delete(buf); return (error); } sbuf_printf(buf, "\n" "PHY Type : 0x%02x<%s>\n" "Speed : 0x%02x\n" "Link info: 0x%02x\n" "AN info : 0x%02x\n" "Ext info : 0x%02x\n" "Loopback : 0x%02x\n" "Max Frame: %d\n" "Config : 0x%02x\n" "Power : 0x%02x", link_status.phy_type, ixl_phy_type_string_ls(link_status.phy_type), link_status.link_speed, link_status.link_info, link_status.an_info, link_status.ext_info, link_status.loopback, link_status.max_frame_size, link_status.config, link_status.power_desc); error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; enum i40e_status_code status; struct i40e_aq_get_phy_abilities_resp abilities; struct sbuf *buf; int error = 0; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); return (ENOMEM); } status = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL); if (status) { device_printf(dev, "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); sbuf_delete(buf); return (EIO); } sbuf_printf(buf, "\n" "PHY Type : %08x", abilities.phy_type); if (abilities.phy_type != 0) { sbuf_printf(buf, "<"); for (int i = 0; i < 32; i++) if ((1 << i) & abilities.phy_type) sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false)); - sbuf_printf(buf, ">\n"); + sbuf_printf(buf, ">"); } - sbuf_printf(buf, "PHY Ext : %02x", + sbuf_printf(buf, "\nPHY Ext : %02x", abilities.phy_type_ext); if (abilities.phy_type_ext != 0) { sbuf_printf(buf, "<"); for (int i = 0; i < 4; i++) if ((1 << i) & abilities.phy_type_ext) - sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true)); + sbuf_printf(buf, "%s,", + ixl_phy_type_string(i, true)); sbuf_printf(buf, ">"); } - sbuf_printf(buf, "\n"); - sbuf_printf(buf, - "Speed : %02x\n" + sbuf_printf(buf, "\nSpeed : %02x", abilities.link_speed); + if (abilities.link_speed != 0) { + u8 link_speed; + sbuf_printf(buf, " <"); + for (int i = 0; i < 8; i++) { + link_speed = (1 << i) & abilities.link_speed; + if (link_speed) + sbuf_printf(buf, "%s, ", + ixl_link_speed_string(link_speed)); + } + sbuf_printf(buf, ">"); + } + + sbuf_printf(buf, "\n" "Abilities: %02x\n" "EEE cap : %04x\n" "EEER reg : %08x\n" "D3 Lpan : %02x\n" "ID : %02x %02x %02x %02x\n" "ModType : %02x %02x %02x\n" "ModType E: %01x\n" "FEC Cfg : %02x\n" "Ext CC : %02x", - abilities.link_speed, abilities.abilities, abilities.eee_capability, abilities.eeer_val, abilities.d3_lpan, abilities.phy_id[0], abilities.phy_id[1], abilities.phy_id[2], abilities.phy_id[3], abilities.module_type[0], abilities.module_type[1], abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5, abilities.fec_cfg_curr_mod_ext_info & 0x1F, abilities.ext_comp_code); error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct ixl_vsi *vsi = &pf->vsi; struct ixl_mac_filter *f; char *buf, *buf_i; int error = 0; int ftl_len = 0; int ftl_counter = 0; int buf_len = 0; int entry_len = 42; SLIST_FOREACH(f, &vsi->ftl, next) { ftl_len++; } if (ftl_len < 1) { sysctl_handle_string(oidp, "(none)", 6, req); return (0); } buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2; buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT); sprintf(buf_i++, "\n"); SLIST_FOREACH(f, &vsi->ftl, next) { sprintf(buf_i, MAC_FORMAT ", vlan %4d, flags %#06x", MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); buf_i += entry_len; /* don't print '\n' for last entry */ if (++ftl_counter != ftl_len) { sprintf(buf_i, "\n"); buf_i++; } } error = sysctl_handle_string(oidp, buf, strlen(buf), req); if (error) printf("sysctl error: %d\n", error); free(buf, M_DEVBUF); return error; } #define IXL_SW_RES_SIZE 0x14 int ixl_res_alloc_cmp(const void *a, const void *b) { const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two; one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a; two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b; return ((int)one->resource_type - (int)two->resource_type); } /* * Longest string length: 25 */ char * ixl_switch_res_type_string(u8 type) { static char * ixl_switch_res_type_strings[0x14] = { "VEB", "VSI", "Perfect Match MAC address", "S-tag", "(Reserved)", "Multicast hash entry", "Unicast hash entry", "VLAN", "VSI List entry", "(Reserved)", "VLAN Statistic Pool", "Mirror Rule", "Queue Set", "Inner VLAN Forward filter", "(Reserved)", "Inner MAC", "IP", "GRE/VN1 Key", "VN2 Key", "Tunneling Port" }; if (type < 0x14) return ixl_switch_res_type_strings[type]; else return "(Reserved)"; } static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *buf; enum i40e_status_code status; int error = 0; u8 num_entries; struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE]; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for output.\n"); return (ENOMEM); } bzero(resp, sizeof(resp)); status = i40e_aq_get_switch_resource_alloc(hw, &num_entries, resp, IXL_SW_RES_SIZE, NULL); if (status) { device_printf(dev, "%s: get_switch_resource_alloc() error %s, aq error %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); sbuf_delete(buf); return (error); } /* Sort entries by type for display */ qsort(resp, num_entries, sizeof(struct i40e_aqc_switch_resource_alloc_element_resp), &ixl_res_alloc_cmp); sbuf_cat(buf, "\n"); sbuf_printf(buf, "# of entries: %d\n", num_entries); sbuf_printf(buf, " Type | Guaranteed | Total | Used | Un-allocated\n" " | (this) | (all) | (this) | (all) \n"); for (int i = 0; i < num_entries; i++) { sbuf_printf(buf, "%25s | %10d %5d %6d %12d", ixl_switch_res_type_string(resp[i].resource_type), resp[i].guaranteed, resp[i].total, resp[i].used, resp[i].total_unalloced); if (i < num_entries - 1) sbuf_cat(buf, "\n"); } error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } /* ** Caller must init and delete sbuf; this function will clear and ** finish it for caller. ** ** XXX: Cannot use the SEID for this, since there is no longer a ** fixed mapping between SEID and element type. */ char * ixl_switch_element_string(struct sbuf *s, struct i40e_aqc_switch_config_element_resp *element) { sbuf_clear(s); switch (element->element_type) { case I40E_AQ_SW_ELEM_TYPE_MAC: sbuf_printf(s, "MAC %3d", element->element_info); break; case I40E_AQ_SW_ELEM_TYPE_PF: sbuf_printf(s, "PF %3d", element->element_info); break; case I40E_AQ_SW_ELEM_TYPE_VF: sbuf_printf(s, "VF %3d", element->element_info); break; case I40E_AQ_SW_ELEM_TYPE_EMP: sbuf_cat(s, "EMP"); break; case I40E_AQ_SW_ELEM_TYPE_BMC: sbuf_cat(s, "BMC"); break; case I40E_AQ_SW_ELEM_TYPE_PV: sbuf_cat(s, "PV"); break; case I40E_AQ_SW_ELEM_TYPE_VEB: sbuf_cat(s, "VEB"); break; case I40E_AQ_SW_ELEM_TYPE_PA: sbuf_cat(s, "PA"); break; case I40E_AQ_SW_ELEM_TYPE_VSI: sbuf_printf(s, "VSI %3d", element->element_info); break; default: sbuf_cat(s, "?"); break; } sbuf_finish(s); return sbuf_data(s); } static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *buf; struct sbuf *nmbuf; enum i40e_status_code status; int error = 0; u16 next = 0; u8 aq_buf[I40E_AQ_LARGE_BUF]; struct i40e_aqc_get_switch_config_resp *sw_config; sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); return (ENOMEM); } status = i40e_aq_get_switch_config(hw, sw_config, sizeof(aq_buf), &next, NULL); if (status) { device_printf(dev, "%s: aq_get_switch_config() error %s, aq error %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); sbuf_delete(buf); return error; } if (next) device_printf(dev, "%s: TODO: get more config with SEID %d\n", __func__, next); nmbuf = sbuf_new_auto(); if (!nmbuf) { device_printf(dev, "Could not allocate sbuf for name output.\n"); sbuf_delete(buf); return (ENOMEM); } sbuf_cat(buf, "\n"); /* Assuming <= 255 elements in switch */ sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported); sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total); /* Exclude: ** Revision -- all elements are revision 1 for now */ sbuf_printf(buf, "SEID ( Name ) | Uplink | Downlink | Conn Type\n" " | | | (uplink)\n"); for (int i = 0; i < sw_config->header.num_reported; i++) { // "%4d (%8s) | %8s %8s %#8x", sbuf_printf(buf, "%4d", sw_config->element[i].seid); sbuf_cat(buf, " "); sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, &sw_config->element[i])); sbuf_cat(buf, " | "); sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid); sbuf_cat(buf, " "); sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid); sbuf_cat(buf, " "); sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type); if (i < sw_config->header.num_reported - 1) sbuf_cat(buf, "\n"); } sbuf_delete(nmbuf); error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *buf; int error = 0; enum i40e_status_code status; u32 reg; struct i40e_aqc_get_set_rss_key_data key_data; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for output.\n"); return (ENOMEM); } bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key)); sbuf_cat(buf, "\n"); if (hw->mac.type == I40E_MAC_X722) { status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data); if (status) device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); } else { for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) { reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); bcopy(®, ((caddr_t)&key_data) + (i << 2), 4); } } ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true); error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } static void ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text) { int i, j, k, width; char c; if (length < 1 || buf == NULL) return; int byte_stride = 16; int lines = length / byte_stride; int rem = length % byte_stride; if (rem > 0) lines++; for (i = 0; i < lines; i++) { width = (rem > 0 && i == lines - 1) ? rem : byte_stride; sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride); for (j = 0; j < width; j++) sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]); if (width < byte_stride) { for (k = 0; k < (byte_stride - width); k++) sbuf_printf(sb, " "); } if (!text) { sbuf_printf(sb, "\n"); continue; } for (j = 0; j < width; j++) { c = (char)buf[i * byte_stride + j]; if (c < 32 || c > 126) sbuf_printf(sb, "."); else sbuf_printf(sb, "%c", c); if (j == width - 1) sbuf_printf(sb, "\n"); } } } static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *buf; int error = 0; enum i40e_status_code status; u8 hlut[512]; u32 reg; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for output.\n"); return (ENOMEM); } bzero(hlut, sizeof(hlut)); sbuf_cat(buf, "\n"); if (hw->mac.type == I40E_MAC_X722) { status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut)); if (status) device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); } else { for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) { reg = rd32(hw, I40E_PFQF_HLUT(i)); bcopy(®, &hlut[i << 2], 4); } } ixl_sbuf_print_bytes(buf, hlut, 512, 0, false); error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; u64 hena; hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); return sysctl_handle_long(oidp, NULL, hena, req); } /* * Sysctl to disable firmware's link management * * 1 - Disable link management on this port * 0 - Re-enable link management * * On normal NVMs, firmware manages link by default. */ static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; int requested_mode = -1; enum i40e_status_code status = 0; int error = 0; /* Read in new mode */ error = sysctl_handle_int(oidp, &requested_mode, 0, req); if ((error) || (req->newptr == NULL)) return (error); /* Check for sane value */ if (requested_mode < 0 || requested_mode > 1) { device_printf(dev, "Valid modes are 0 or 1\n"); return (EINVAL); } /* Set new mode */ status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL); if (status) { device_printf(dev, "%s: Error setting new phy debug mode %s," " aq error: %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); return (EIO); } return (0); } /* + * Read some diagnostic data from a (Q)SFP+ module + * + * SFP A2 QSFP Lower Page + * Temperature 96-97 22-23 + * Vcc 98-99 26-27 + * TX power 102-103 34-35..40-41 + * RX power 104-105 50-51..56-57 + */ +static int +ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS) +{ + struct ixl_pf *pf = (struct ixl_pf *)arg1; + device_t dev = pf->dev; + struct sbuf *sbuf; + int error = 0; + u8 output; + + if (req->oldptr == NULL) { + error = SYSCTL_OUT(req, 0, 128); + return (0); + } + + error = pf->read_i2c_byte(pf, 0, 0xA0, &output); + if (error) { + device_printf(dev, "Error reading from i2c\n"); + return (error); + } + + /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */ + if (output == 0x3) { + /* + * Check for: + * - Internally calibrated data + * - Diagnostic monitoring is implemented + */ + pf->read_i2c_byte(pf, 92, 0xA0, &output); + if (!(output & 0x60)) { + device_printf(dev, "Module doesn't support diagnostics: %02X\n", output); + return (0); + } + + sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); + + for (u8 offset = 96; offset < 100; offset++) { + pf->read_i2c_byte(pf, offset, 0xA2, &output); + sbuf_printf(sbuf, "%02X ", output); + } + for (u8 offset = 102; offset < 106; offset++) { + pf->read_i2c_byte(pf, offset, 0xA2, &output); + sbuf_printf(sbuf, "%02X ", output); + } + } else if (output == 0xD || output == 0x11) { + /* + * QSFP+ modules are always internally calibrated, and must indicate + * what types of diagnostic monitoring are implemented + */ + sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); + + for (u8 offset = 22; offset < 24; offset++) { + pf->read_i2c_byte(pf, offset, 0xA0, &output); + sbuf_printf(sbuf, "%02X ", output); + } + for (u8 offset = 26; offset < 28; offset++) { + pf->read_i2c_byte(pf, offset, 0xA0, &output); + sbuf_printf(sbuf, "%02X ", output); + } + /* Read the data from the first lane */ + for (u8 offset = 34; offset < 36; offset++) { + pf->read_i2c_byte(pf, offset, 0xA0, &output); + sbuf_printf(sbuf, "%02X ", output); + } + for (u8 offset = 50; offset < 52; offset++) { + pf->read_i2c_byte(pf, offset, 0xA0, &output); + sbuf_printf(sbuf, "%02X ", output); + } + } else { + device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output); + return (0); + } + + sbuf_finish(sbuf); + sbuf_delete(sbuf); + + return (0); +} + +/* * Sysctl to read a byte from I2C bus. * * Input: 32-bit value: * bits 0-7: device address (0xA0 or 0xA2) * bits 8-15: offset (0-255) * bits 16-31: unused * Output: 8-bit value read */ static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; int input = -1, error = 0; - device_printf(dev, "%s: start\n", __func__); - u8 dev_addr, offset, output; /* Read in I2C read parameters */ error = sysctl_handle_int(oidp, &input, 0, req); if ((error) || (req->newptr == NULL)) return (error); /* Validate device address */ dev_addr = input & 0xFF; if (dev_addr != 0xA0 && dev_addr != 0xA2) { return (EINVAL); } offset = (input >> 8) & 0xFF; - error = ixl_read_i2c_byte(pf, offset, dev_addr, &output); + error = pf->read_i2c_byte(pf, offset, dev_addr, &output); if (error) return (error); device_printf(dev, "%02X\n", output); return (0); } /* * Sysctl to write a byte to the I2C bus. * * Input: 32-bit value: * bits 0-7: device address (0xA0 or 0xA2) * bits 8-15: offset (0-255) * bits 16-23: value to write * bits 24-31: unused * Output: 8-bit value written */ static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; int input = -1, error = 0; u8 dev_addr, offset, value; /* Read in I2C write parameters */ error = sysctl_handle_int(oidp, &input, 0, req); if ((error) || (req->newptr == NULL)) return (error); /* Validate device address */ dev_addr = input & 0xFF; if (dev_addr != 0xA0 && dev_addr != 0xA2) { return (EINVAL); } offset = (input >> 8) & 0xFF; value = (input >> 16) & 0xFF; - error = ixl_write_i2c_byte(pf, offset, dev_addr, value); + error = pf->write_i2c_byte(pf, offset, dev_addr, value); if (error) return (error); device_printf(dev, "%02X written\n", value); return (0); } static int ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, u8 bit_pos, int *is_set) { device_t dev = pf->dev; struct i40e_hw *hw = &pf->hw; enum i40e_status_code status; status = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, abilities, NULL); if (status) { device_printf(dev, "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); return (EIO); } *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos); return (0); } static int ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, u8 bit_pos, int set) { device_t dev = pf->dev; struct i40e_hw *hw = &pf->hw; struct i40e_aq_set_phy_config config; enum i40e_status_code status; /* Set new PHY config */ memset(&config, 0, sizeof(config)); config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos); if (set) config.fec_config |= bit_pos; if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) { config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; config.phy_type = abilities->phy_type; config.phy_type_ext = abilities->phy_type_ext; config.link_speed = abilities->link_speed; config.eee_capability = abilities->eee_capability; config.eeer = abilities->eeer_val; config.low_power_ctrl = abilities->d3_lpan; status = i40e_aq_set_phy_config(hw, &config, NULL); if (status) { device_printf(dev, "%s: i40e_aq_set_phy_config() status %s, aq error %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); return (EIO); } } return (0); } static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int mode, error = 0; struct i40e_aq_get_phy_abilities_resp abilities; error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode); if (error) return (error); /* Read in new mode */ error = sysctl_handle_int(oidp, &mode, 0, req); if ((error) || (req->newptr == NULL)) return (error); return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode)); } static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int mode, error = 0; struct i40e_aq_get_phy_abilities_resp abilities; error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode); if (error) return (error); /* Read in new mode */ error = sysctl_handle_int(oidp, &mode, 0, req); if ((error) || (req->newptr == NULL)) return (error); return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode)); } static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int mode, error = 0; struct i40e_aq_get_phy_abilities_resp abilities; error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode); if (error) return (error); /* Read in new mode */ error = sysctl_handle_int(oidp, &mode, 0, req); if ((error) || (req->newptr == NULL)) return (error); return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode)); } static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int mode, error = 0; struct i40e_aq_get_phy_abilities_resp abilities; error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode); if (error) return (error); /* Read in new mode */ error = sysctl_handle_int(oidp, &mode, 0, req); if ((error) || (req->newptr == NULL)) return (error); return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode)); } static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int mode, error = 0; struct i40e_aq_get_phy_abilities_resp abilities; error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode); if (error) return (error); /* Read in new mode */ error = sysctl_handle_int(oidp, &mode, 0, req); if ((error) || (req->newptr == NULL)) return (error); return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode)); } static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *buf; int error = 0; enum i40e_status_code status; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for output.\n"); return (ENOMEM); } u8 *final_buff; /* This amount is only necessary if reading the entire cluster into memory */ #define IXL_FINAL_BUFF_SIZE (1280 * 1024) final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK); - if (final_buff == NULL) { - device_printf(dev, "Could not allocate memory for output.\n"); - goto out; - } int final_buff_len = 0; u8 cluster_id = 1; bool more = true; u8 dump_buf[4096]; u16 curr_buff_size = 4096; u8 curr_next_table = 0; u32 curr_next_index = 0; u16 ret_buff_size; u8 ret_next_table; u32 ret_next_index; sbuf_cat(buf, "\n"); while (more) { status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size, dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL); if (status) { device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); goto free_out; } /* copy info out of temp buffer */ bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size); final_buff_len += ret_buff_size; if (ret_next_table != curr_next_table) { /* We're done with the current table; we can dump out read data. */ sbuf_printf(buf, "%d:", curr_next_table); int bytes_printed = 0; while (bytes_printed <= final_buff_len) { sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), ""); bytes_printed += 16; } sbuf_cat(buf, "\n"); /* The entire cluster has been read; we're finished */ if (ret_next_table == 0xFF) break; /* Otherwise clear the output buffer and continue reading */ bzero(final_buff, IXL_FINAL_BUFF_SIZE); final_buff_len = 0; } if (ret_next_index == 0xFFFFFFFF) ret_next_index = 0; bzero(dump_buf, sizeof(dump_buf)); curr_next_table = ret_next_table; curr_next_index = ret_next_index; } free_out: free(final_buff, M_DEVBUF); -out: error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } static int -ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS) +ixl_start_fw_lldp(struct ixl_pf *pf) { - struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; + enum i40e_status_code status; + + status = i40e_aq_start_lldp(hw, false, NULL); + if (status != I40E_SUCCESS) { + switch (hw->aq.asq_last_status) { + case I40E_AQ_RC_EEXIST: + device_printf(pf->dev, + "FW LLDP agent is already running\n"); + break; + case I40E_AQ_RC_EPERM: + device_printf(pf->dev, + "Device configuration forbids SW from starting " + "the LLDP agent. Set the \"LLDP Agent\" UEFI HII " + "attribute to \"Enabled\" to use this sysctl\n"); + return (EINVAL); + default: + device_printf(pf->dev, + "Starting FW LLDP agent failed: error: %s, %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); + return (EINVAL); + } + } + + atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); + return (0); +} + +static int +ixl_stop_fw_lldp(struct ixl_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; - int error = 0; - int state, new_state; enum i40e_status_code status; + + if (hw->func_caps.npar_enable != 0) { + device_printf(dev, + "Disabling FW LLDP agent is not supported on this device\n"); + return (EINVAL); + } + + if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) { + device_printf(dev, + "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n"); + return (EINVAL); + } + + status = i40e_aq_stop_lldp(hw, true, false, NULL); + if (status != I40E_SUCCESS) { + if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) { + device_printf(dev, + "Disabling FW LLDP agent failed: error: %s, %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); + return (EINVAL); + } + + device_printf(dev, "FW LLDP agent is already stopped\n"); + } + + i40e_aq_set_dcb_parameters(hw, true, NULL); + atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); + return (0); +} + +static int +ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS) +{ + struct ixl_pf *pf = (struct ixl_pf *)arg1; + int state, new_state, error = 0; + state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0); /* Read in new mode */ error = sysctl_handle_int(oidp, &new_state, 0, req); if ((error) || (req->newptr == NULL)) return (error); /* Already in requested state */ if (new_state == state) return (error); - if (new_state == 0) { - if (hw->mac.type == I40E_MAC_X722 || hw->func_caps.npar_enable != 0) { - device_printf(dev, "Disabling FW LLDP agent is not supported on this device\n"); - return (EINVAL); - } + if (new_state == 0) + return ixl_stop_fw_lldp(pf); - if (pf->hw.aq.api_maj_ver < 1 || - (pf->hw.aq.api_maj_ver == 1 && - pf->hw.aq.api_min_ver < 7)) { - device_printf(dev, "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n"); - return (EINVAL); - } - - i40e_aq_stop_lldp(&pf->hw, true, NULL); - i40e_aq_set_dcb_parameters(&pf->hw, true, NULL); - atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); - } else { - status = i40e_aq_start_lldp(&pf->hw, NULL); - if (status != I40E_SUCCESS && hw->aq.asq_last_status == I40E_AQ_RC_EEXIST) - device_printf(dev, "FW LLDP agent is already running\n"); - atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); - } - - return (0); + return ixl_start_fw_lldp(pf); } -/* - * Get FW LLDP Agent status - */ -int -ixl_get_fw_lldp_status(struct ixl_pf *pf) +static int +ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS) { - enum i40e_status_code ret = I40E_SUCCESS; - struct i40e_lldp_variables lldp_cfg; - struct i40e_hw *hw = &pf->hw; - u8 adminstatus = 0; + struct ixl_pf *pf = (struct ixl_pf *)arg1; + int state, new_state; + int sysctl_handle_status = 0; + enum i40e_status_code cmd_status; - ret = i40e_read_lldp_cfg(hw, &lldp_cfg); - if (ret) - return ret; + /* Init states' values */ + state = new_state = (!!(pf->state & IXL_PF_STATE_EEE_ENABLED)); - /* Get the LLDP AdminStatus for the current port */ - adminstatus = lldp_cfg.adminstatus >> (hw->port * 4); - adminstatus &= 0xf; + /* Get requested mode */ + sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req); + if ((sysctl_handle_status) || (req->newptr == NULL)) + return (sysctl_handle_status); - /* Check if LLDP agent is disabled */ - if (!adminstatus) { - device_printf(pf->dev, "FW LLDP agent is disabled for this PF.\n"); - atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); - } else - atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); + /* Check if state has changed */ + if (new_state == state) + return (0); + + /* Set new state */ + cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state)); + + /* Save new state or report error */ + if (!cmd_status) { + if (new_state == 0) + atomic_clear_int(&pf->state, IXL_PF_STATE_EEE_ENABLED); + else + atomic_set_int(&pf->state, IXL_PF_STATE_EEE_ENABLED); + } else if (cmd_status == I40E_ERR_CONFIG) + return (EPERM); + else + return (EIO); return (0); } int ixl_attach_get_link_status(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; int error = 0; if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || (hw->aq.fw_maj_ver < 4)) { i40e_msec_delay(75); error = i40e_aq_set_link_restart_an(hw, TRUE, NULL); if (error) { device_printf(dev, "link restart failed, aq_err=%d\n", pf->hw.aq.asq_last_status); return error; } } /* Determine link state */ hw->phy.get_link_info = TRUE; i40e_get_link_status(hw, &pf->link_up); return (0); } Index: releng/11.3/sys/dev/ixl/ixl_pf_qmgr.c =================================================================== --- releng/11.3/sys/dev/ixl/ixl_pf_qmgr.c (revision 349180) +++ releng/11.3/sys/dev/ixl/ixl_pf_qmgr.c (revision 349181) @@ -1,308 +1,308 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixl_pf_qmgr.h" static int ixl_pf_qmgr_find_free_contiguous_block(struct ixl_pf_qmgr *qmgr, int num); int ixl_pf_qmgr_init(struct ixl_pf_qmgr *qmgr, u16 num_queues) { if (num_queues < 1) return (EINVAL); qmgr->num_queues = num_queues; qmgr->qinfo = malloc(num_queues * sizeof(struct ixl_pf_qmgr_qinfo), M_IXL, M_ZERO | M_WAITOK); if (qmgr->qinfo == NULL) return ENOMEM; return (0); } int ixl_pf_qmgr_alloc_contiguous(struct ixl_pf_qmgr *qmgr, u16 num, struct ixl_pf_qtag *qtag) { int i; int avail; int block_start; u16 alloc_size; if (qtag == NULL || num < 1) return (EINVAL); /* We have to allocate in power-of-two chunks, so get next power of two */ alloc_size = (u16)next_power_of_two(num); /* Don't try if there aren't enough queues */ avail = ixl_pf_qmgr_get_num_free(qmgr); if (avail < alloc_size) return (ENOSPC); block_start = ixl_pf_qmgr_find_free_contiguous_block(qmgr, alloc_size); if (block_start < 0) return (ENOSPC); /* Mark queues as allocated */ for (i = block_start; i < block_start + alloc_size; i++) qmgr->qinfo[i].allocated = true; bzero(qtag, sizeof(*qtag)); qtag->qmgr = qmgr; qtag->type = IXL_PF_QALLOC_CONTIGUOUS; qtag->qidx[0] = block_start; - qtag->num_allocated = num; - qtag->num_active = alloc_size; + qtag->num_allocated = alloc_size; + qtag->num_active = num; return (0); } /* * NB: indices is u16 because this is the queue index width used in the Add VSI AQ command */ int ixl_pf_qmgr_alloc_scattered(struct ixl_pf_qmgr *qmgr, u16 num, struct ixl_pf_qtag *qtag) { int i; int avail, count = 0; u16 alloc_size; if (qtag == NULL || num < 1 || num > 16) return (EINVAL); /* We have to allocate in power-of-two chunks, so get next power of two */ alloc_size = (u16)next_power_of_two(num); avail = ixl_pf_qmgr_get_num_free(qmgr); if (avail < alloc_size) return (ENOSPC); bzero(qtag, sizeof(*qtag)); qtag->qmgr = qmgr; qtag->type = IXL_PF_QALLOC_SCATTERED; qtag->num_active = num; qtag->num_allocated = alloc_size; for (i = 0; i < qmgr->num_queues; i++) { if (!qmgr->qinfo[i].allocated) { qtag->qidx[count] = i; count++; qmgr->qinfo[i].allocated = true; if (count == alloc_size) return (0); } } // Shouldn't get here return (EDOOFUS); } int ixl_pf_qmgr_release(struct ixl_pf_qmgr *qmgr, struct ixl_pf_qtag *qtag) { u16 i, qidx; if (qtag == NULL) return (EINVAL); if (qtag->type == IXL_PF_QALLOC_SCATTERED) { for (i = 0; i < qtag->num_allocated; i++) { qidx = qtag->qidx[i]; bzero(&qmgr->qinfo[qidx], sizeof(qmgr->qinfo[qidx])); } } else { u16 first_index = qtag->qidx[0]; for (i = first_index; i < first_index + qtag->num_allocated; i++) bzero(&qmgr->qinfo[i], sizeof(qmgr->qinfo[qidx])); } qtag->qmgr = NULL; return (0); } int ixl_pf_qmgr_get_num_queues(struct ixl_pf_qmgr *qmgr) { return (qmgr->num_queues); } /* * ERJ: This assumes the info array isn't longer than INT_MAX. * This assumption might cause a y3k bug or something, I'm sure. */ int ixl_pf_qmgr_get_num_free(struct ixl_pf_qmgr *qmgr) { int count = 0; for (int i = 0; i < qmgr->num_queues; i++) { if (!qmgr->qinfo[i].allocated) count++; } return (count); } int ixl_pf_qmgr_get_first_free(struct ixl_pf_qmgr *qmgr, u16 start) { int i; if (start > qmgr->num_queues - 1) return (-EINVAL); for (i = start; i < qmgr->num_queues; i++) { if (qmgr->qinfo[i].allocated) continue; else return (i); } // No free queues return (-ENOSPC); } void ixl_pf_qmgr_destroy(struct ixl_pf_qmgr *qmgr) { free(qmgr->qinfo, M_IXL); qmgr->qinfo = NULL; } void ixl_pf_qmgr_mark_queue_enabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx) { MPASS(qtag != NULL); struct ixl_pf_qmgr *qmgr = qtag->qmgr; u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); if (tx) qmgr->qinfo[pf_qidx].tx_enabled = true; else qmgr->qinfo[pf_qidx].rx_enabled = true; } void ixl_pf_qmgr_mark_queue_disabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx) { MPASS(qtag != NULL); struct ixl_pf_qmgr *qmgr = qtag->qmgr; u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); if (tx) qmgr->qinfo[pf_qidx].tx_enabled = false; else qmgr->qinfo[pf_qidx].rx_enabled = false; } void ixl_pf_qmgr_mark_queue_configured(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx) { MPASS(qtag != NULL); struct ixl_pf_qmgr *qmgr = qtag->qmgr; u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); if (tx) qmgr->qinfo[pf_qidx].tx_configured = true; else qmgr->qinfo[pf_qidx].rx_configured = true; } bool ixl_pf_qmgr_is_queue_enabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx) { MPASS(qtag != NULL); struct ixl_pf_qmgr *qmgr = qtag->qmgr; u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); if (tx) return (qmgr->qinfo[pf_qidx].tx_enabled); else return (qmgr->qinfo[pf_qidx].rx_enabled); } bool ixl_pf_qmgr_is_queue_configured(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx) { MPASS(qtag != NULL); struct ixl_pf_qmgr *qmgr = qtag->qmgr; u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); if (tx) return (qmgr->qinfo[pf_qidx].tx_configured); else return (qmgr->qinfo[pf_qidx].rx_configured); } u16 ixl_pf_qidx_from_vsi_qidx(struct ixl_pf_qtag *qtag, u16 index) { MPASS(index < qtag->num_allocated); if (qtag->type == IXL_PF_QALLOC_CONTIGUOUS) return qtag->qidx[0] + index; else return qtag->qidx[index]; } /* Static Functions */ static int ixl_pf_qmgr_find_free_contiguous_block(struct ixl_pf_qmgr *qmgr, int num) { int i; int count = 0; bool block_started = false; int possible_start; for (i = 0; i < qmgr->num_queues; i++) { if (!qmgr->qinfo[i].allocated) { if (!block_started) { block_started = true; possible_start = i; } count++; if (count == num) return (possible_start); } else { /* this queue is already allocated */ block_started = false; count = 0; } } /* Can't find a contiguous block of the requested size */ return (-1); } Index: releng/11.3/sys/dev/ixl/ixl_pf_qmgr.h =================================================================== --- releng/11.3/sys/dev/ixl/ixl_pf_qmgr.h (revision 349180) +++ releng/11.3/sys/dev/ixl/ixl_pf_qmgr.h (revision 349181) @@ -1,109 +1,109 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixl_pf.h" #ifndef _IXL_PF_QMGR_H_ #define _IXL_PF_QMGR_H_ /* * Primarily manages the queues that need to be allocated to VSIs. * * Cardinality: There should only be one of these in a PF. * Lifetime: Created and initialized in attach(); destroyed in detach(). */ #define IXL_MAX_SCATTERED_QUEUES 16 #define IXL_MAX_CONTIGUOUS_QUEUES_XL710 64 #define IXL_MAX_CONTIGUOUS_QUEUES_X722 128 /* Structures */ /* Manager */ struct ixl_pf_qmgr_qinfo { bool allocated; bool tx_enabled; bool rx_enabled; bool tx_configured; bool rx_configured; }; struct ixl_pf_qmgr { u16 num_queues; struct ixl_pf_qmgr_qinfo *qinfo; }; /* Tag */ enum ixl_pf_qmgr_qalloc_type { IXL_PF_QALLOC_CONTIGUOUS, IXL_PF_QALLOC_SCATTERED }; struct ixl_pf_qtag { struct ixl_pf_qmgr *qmgr; enum ixl_pf_qmgr_qalloc_type type; u16 qidx[IXL_MAX_SCATTERED_QUEUES]; u16 num_allocated; u16 num_active; }; /* Public manager functions */ int ixl_pf_qmgr_init(struct ixl_pf_qmgr *qmgr, u16 num_queues); void ixl_pf_qmgr_destroy(struct ixl_pf_qmgr *qmgr); int ixl_pf_qmgr_get_num_queues(struct ixl_pf_qmgr *qmgr); int ixl_pf_qmgr_get_first_free(struct ixl_pf_qmgr *qmgr, u16 start); int ixl_pf_qmgr_get_num_free(struct ixl_pf_qmgr *qmgr); /* Allocate queues for a VF VSI */ int ixl_pf_qmgr_alloc_scattered(struct ixl_pf_qmgr *qmgr, u16 num, struct ixl_pf_qtag *qtag); /* Allocate queues for the LAN VSIs, or X722 VF VSIs */ int ixl_pf_qmgr_alloc_contiguous(struct ixl_pf_qmgr *qmgr, u16 num, struct ixl_pf_qtag *qtag); /* Release a queue allocation */ int ixl_pf_qmgr_release(struct ixl_pf_qmgr *qmgr, struct ixl_pf_qtag *qtag); /* Help manage queues used in VFs */ /* Typically hardware refers to RX as 0 and TX as 1, so continue that convention here */ void ixl_pf_qmgr_mark_queue_enabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx); void ixl_pf_qmgr_mark_queue_disabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx); void ixl_pf_qmgr_mark_queue_configured(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx); bool ixl_pf_qmgr_is_queue_enabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx); bool ixl_pf_qmgr_is_queue_configured(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx); /* Public tag functions */ u16 ixl_pf_qidx_from_vsi_qidx(struct ixl_pf_qtag *qtag, u16 index); #endif /* _IXL_PF_QMGR_H_ */ Index: releng/11.3/sys/dev/ixl/ixl_txrx.c =================================================================== --- releng/11.3/sys/dev/ixl/ixl_txrx.c (revision 349180) +++ releng/11.3/sys/dev/ixl/ixl_txrx.c (revision 349181) @@ -1,2149 +1,2342 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ /* ** IXL driver TX/RX Routines: ** This was seperated to allow usage by ** both the PF and VF drivers. */ #ifndef IXL_STANDALONE_BUILD #include "opt_inet.h" #include "opt_inet6.h" #include "opt_rss.h" #endif #include "ixl.h" #ifdef RSS #include #endif /* Local Prototypes */ static void ixl_rx_checksum(struct mbuf *, u32, u32, u8); static void ixl_refresh_mbufs(struct ixl_queue *, int); static int ixl_xmit(struct ixl_queue *, struct mbuf **); static int ixl_tx_setup_offload(struct ixl_queue *, struct mbuf *, u32 *, u32 *); static bool ixl_tso_setup(struct ixl_queue *, struct mbuf *); static void ixl_queue_sw_irq(struct ixl_vsi *, int); static inline void ixl_rx_discard(struct rx_ring *, int); static inline void ixl_rx_input(struct rx_ring *, struct ifnet *, struct mbuf *, u8); static inline bool ixl_tso_detect_sparse(struct mbuf *mp); static inline u32 ixl_get_tx_head(struct ixl_queue *que); #ifdef DEV_NETMAP -#include +#include +#include +#include #endif /* DEV_NETMAP */ +#ifdef IXL_DEBUG +static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS); +static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS); +#endif + /* * @key key is saved into this parameter */ void ixl_get_default_rss_key(u32 *key) { MPASS(key != NULL); u32 rss_seed[IXL_RSS_KEY_SIZE_REG] = {0x41b01687, 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377, 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1, 0x0, 0x0, 0x0}; bcopy(rss_seed, key, IXL_RSS_KEY_SIZE); } /** * i40e_vc_stat_str - convert virtchnl status err code to a string * @hw: pointer to the HW structure * @stat_err: the status error code to convert **/ const char * i40e_vc_stat_str(struct i40e_hw *hw, enum virtchnl_status_code stat_err) { switch (stat_err) { case VIRTCHNL_STATUS_SUCCESS: return "OK"; case VIRTCHNL_ERR_PARAM: return "VIRTCHNL_ERR_PARAM"; + case VIRTCHNL_STATUS_ERR_NO_MEMORY: + return "VIRTCHNL_STATUS_ERR_NO_MEMORY"; case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH: return "VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH"; case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR: return "VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR"; case VIRTCHNL_STATUS_ERR_INVALID_VF_ID: return "VIRTCHNL_STATUS_ERR_INVALID_VF_ID"; - case VIRTCHNL_STATUS_NOT_SUPPORTED: - return "VIRTCHNL_STATUS_NOT_SUPPORTED"; + case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR: + return "VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR"; + case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED: + return "VIRTCHNL_STATUS_ERR_NOT_SUPPORTED"; } snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); return hw->err_str; } /* * PCI BUSMASTER needs to be set for proper operation. */ void ixl_set_busmaster(device_t dev) { u16 pci_cmd_word; pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); pci_cmd_word |= PCIM_CMD_BUSMASTEREN; pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2); } /* * Rewrite the ENABLE bit in the MSIX control register */ void ixl_set_msix_enable(device_t dev) { int msix_ctrl, rid; pci_find_cap(dev, PCIY_MSIX, &rid); rid += PCIR_MSIX_CTRL; msix_ctrl = pci_read_config(dev, rid, 2); msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; pci_write_config(dev, rid, msix_ctrl, 2); } /* ** Multiqueue Transmit driver */ int ixl_mq_start(struct ifnet *ifp, struct mbuf *m) { struct ixl_vsi *vsi = ifp->if_softc; struct ixl_queue *que; struct tx_ring *txr; int err, i; #ifdef RSS u32 bucket_id; #endif /* * Which queue to use: * * When doing RSS, map it to the same outbound * queue as the incoming flow would be mapped to. * If everything is setup correctly, it should be * the same bucket that the current CPU we're on is. */ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { #ifdef RSS if (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m), &bucket_id) == 0) { i = bucket_id % vsi->num_queues; } else #endif i = m->m_pkthdr.flowid % vsi->num_queues; } else i = curcpu % vsi->num_queues; que = &vsi->queues[i]; txr = &que->txr; err = drbr_enqueue(ifp, txr->br, m); if (err) return (err); if (IXL_TX_TRYLOCK(txr)) { ixl_mq_start_locked(ifp, txr); IXL_TX_UNLOCK(txr); } else taskqueue_enqueue(que->tq, &que->tx_task); return (0); } int ixl_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr) { struct ixl_queue *que = txr->que; struct ixl_vsi *vsi = que->vsi; struct mbuf *next; int err = 0; if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) || vsi->link_active == 0) return (ENETDOWN); /* Process the transmit queue */ while ((next = drbr_peek(ifp, txr->br)) != NULL) { if ((err = ixl_xmit(que, &next)) != 0) { if (next == NULL) drbr_advance(ifp, txr->br); else drbr_putback(ifp, txr->br, next); break; } drbr_advance(ifp, txr->br); /* Send a copy of the frame to the BPF listener */ ETHER_BPF_MTAP(ifp, next); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; } if (txr->avail < IXL_TX_CLEANUP_THRESHOLD) ixl_txeof(que); return (err); } /* * Called from a taskqueue to drain queued transmit packets. */ void ixl_deferred_mq_start(void *arg, int pending) { struct ixl_queue *que = arg; struct tx_ring *txr = &que->txr; struct ixl_vsi *vsi = que->vsi; struct ifnet *ifp = vsi->ifp; IXL_TX_LOCK(txr); if (!drbr_empty(ifp, txr->br)) ixl_mq_start_locked(ifp, txr); IXL_TX_UNLOCK(txr); } /* ** Flush all queue ring buffers */ void ixl_qflush(struct ifnet *ifp) { struct ixl_vsi *vsi = ifp->if_softc; for (int i = 0; i < vsi->num_queues; i++) { struct ixl_queue *que = &vsi->queues[i]; struct tx_ring *txr = &que->txr; struct mbuf *m; IXL_TX_LOCK(txr); while ((m = buf_ring_dequeue_sc(txr->br)) != NULL) m_freem(m); IXL_TX_UNLOCK(txr); } if_qflush(ifp); } static inline bool ixl_tso_detect_sparse(struct mbuf *mp) { struct mbuf *m; int num, mss; num = 0; mss = mp->m_pkthdr.tso_segsz; /* Exclude first mbuf; assume it contains all headers */ for (m = mp->m_next; m != NULL; m = m->m_next) { if (m == NULL) break; num++; mss -= m->m_len % mp->m_pkthdr.tso_segsz; if (num > IXL_SPARSE_CHAIN) return (true); if (mss < 1) { num = (mss == 0) ? 0 : 1; mss += mp->m_pkthdr.tso_segsz; } } return (false); } /********************************************************************* * * This routine maps the mbufs to tx descriptors, allowing the * TX engine to transmit the packets. * - return 0 on success, positive on failure * **********************************************************************/ #define IXL_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) static int ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp) { struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct tx_ring *txr = &que->txr; struct ixl_tx_buf *buf; struct i40e_tx_desc *txd = NULL; struct mbuf *m_head, *m; int i, j, error, nsegs; int first, last = 0; u16 vtag = 0; u32 cmd, off; bus_dmamap_t map; bus_dma_tag_t tag; bus_dma_segment_t segs[IXL_MAX_TSO_SEGS]; cmd = off = 0; m_head = *m_headp; /* * Important to capture the first descriptor * used because it will contain the index of * the one we tell the hardware to report back */ first = txr->next_avail; buf = &txr->buffers[first]; map = buf->map; tag = txr->tx_tag; if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { /* Use larger mapping for TSO */ tag = txr->tso_tag; if (ixl_tso_detect_sparse(m_head)) { m = m_defrag(m_head, M_NOWAIT); if (m == NULL) { m_freem(*m_headp); *m_headp = NULL; return (ENOBUFS); } *m_headp = m; } } /* * Map the packet for DMA. */ error = bus_dmamap_load_mbuf_sg(tag, map, *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { struct mbuf *m; m = m_defrag(*m_headp, M_NOWAIT); if (m == NULL) { que->mbuf_defrag_failed++; m_freem(*m_headp); *m_headp = NULL; return (ENOBUFS); } *m_headp = m; /* Try it again */ error = bus_dmamap_load_mbuf_sg(tag, map, *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { que->tx_dmamap_failed++; m_freem(*m_headp); *m_headp = NULL; return (error); } } else if (error != 0) { que->tx_dmamap_failed++; m_freem(*m_headp); *m_headp = NULL; return (error); } /* Make certain there are enough descriptors */ if (nsegs > txr->avail - 2) { txr->no_desc++; error = ENOBUFS; goto xmit_fail; } m_head = *m_headp; /* Set up the TSO/CSUM offload */ if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) { error = ixl_tx_setup_offload(que, m_head, &cmd, &off); if (error) goto xmit_fail; } cmd |= I40E_TX_DESC_CMD_ICRC; /* Grab the VLAN tag */ if (m_head->m_flags & M_VLANTAG) { cmd |= I40E_TX_DESC_CMD_IL2TAG1; vtag = htole16(m_head->m_pkthdr.ether_vtag); } i = txr->next_avail; for (j = 0; j < nsegs; j++) { bus_size_t seglen; buf = &txr->buffers[i]; buf->tag = tag; /* Keep track of the type tag */ txd = &txr->base[i]; seglen = segs[j].ds_len; txd->buffer_addr = htole64(segs[j].ds_addr); txd->cmd_type_offset_bsz = htole64(I40E_TX_DESC_DTYPE_DATA | ((u64)cmd << I40E_TXD_QW1_CMD_SHIFT) | ((u64)off << I40E_TXD_QW1_OFFSET_SHIFT) | ((u64)seglen << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | ((u64)vtag << I40E_TXD_QW1_L2TAG1_SHIFT)); last = i; /* descriptor that will get completion IRQ */ if (++i == que->num_tx_desc) i = 0; buf->m_head = NULL; buf->eop_index = -1; } /* Set the last descriptor for report */ txd->cmd_type_offset_bsz |= htole64(((u64)IXL_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT)); txr->avail -= nsegs; txr->next_avail = i; buf->m_head = m_head; /* Swap the dma map between the first and last descriptor. * The descriptor that gets checked on completion will now * have the real map from the first descriptor. */ txr->buffers[first].map = buf->map; buf->map = map; bus_dmamap_sync(tag, map, BUS_DMASYNC_PREWRITE); /* Set the index of the descriptor that will be marked done */ buf = &txr->buffers[first]; buf->eop_index = last; bus_dmamap_sync(txr->dma.tag, txr->dma.map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* * Advance the Transmit Descriptor Tail (Tdt), this tells the * hardware that this frame is available to transmit. */ ++txr->total_packets; wr32(hw, txr->tail, i); /* Mark outstanding work */ atomic_store_rel_32(&txr->watchdog_timer, IXL_WATCHDOG); return (0); xmit_fail: bus_dmamap_unload(tag, buf->map); return (error); } /********************************************************************* * * Allocate memory for tx_buffer structures. The tx_buffer stores all * the information needed to transmit a packet on the wire. This is * called only once at attach, setup is done every reset. * **********************************************************************/ int ixl_allocate_tx_data(struct ixl_queue *que) { struct tx_ring *txr = &que->txr; struct ixl_vsi *vsi = que->vsi; device_t dev = vsi->dev; struct ixl_tx_buf *buf; int i, error = 0; /* * Setup DMA descriptor areas. */ if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ IXL_TSO_SIZE, /* maxsize */ IXL_MAX_TX_SEGS, /* nsegments */ IXL_MAX_DMA_SEG_SIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txr->tx_tag))) { device_printf(dev,"Unable to allocate TX DMA tag\n"); return (error); } /* Make a special tag for TSO */ if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ IXL_TSO_SIZE, /* maxsize */ IXL_MAX_TSO_SEGS, /* nsegments */ IXL_MAX_DMA_SEG_SIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txr->tso_tag))) { device_printf(dev,"Unable to allocate TX TSO DMA tag\n"); goto free_tx_dma; } if (!(txr->buffers = (struct ixl_tx_buf *) malloc(sizeof(struct ixl_tx_buf) * que->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); error = ENOMEM; goto free_tx_tso_dma; } /* Create the descriptor buffer default dma maps */ buf = txr->buffers; for (i = 0; i < que->num_tx_desc; i++, buf++) { buf->tag = txr->tx_tag; error = bus_dmamap_create(buf->tag, 0, &buf->map); if (error != 0) { device_printf(dev, "Unable to create TX DMA map\n"); goto free_buffers; } } return 0; free_buffers: while (i--) { buf--; bus_dmamap_destroy(buf->tag, buf->map); } free(txr->buffers, M_DEVBUF); txr->buffers = NULL; free_tx_tso_dma: bus_dma_tag_destroy(txr->tso_tag); txr->tso_tag = NULL; free_tx_dma: bus_dma_tag_destroy(txr->tx_tag); txr->tx_tag = NULL; return (error); } /********************************************************************* * * (Re)Initialize a queue transmit ring. * - called by init, it clears the descriptor ring, * and frees any stale mbufs * **********************************************************************/ void ixl_init_tx_ring(struct ixl_queue *que) { #ifdef DEV_NETMAP struct netmap_adapter *na = NA(que->vsi->ifp); struct netmap_slot *slot; #endif /* DEV_NETMAP */ struct tx_ring *txr = &que->txr; struct ixl_tx_buf *buf; /* Clear the old ring contents */ IXL_TX_LOCK(txr); #ifdef DEV_NETMAP /* * (under lock): if in netmap mode, do some consistency * checks and set slot to entry 0 of the netmap ring. */ slot = netmap_reset(na, NR_TX, que->me, 0); #endif /* DEV_NETMAP */ bzero((void *)txr->base, (sizeof(struct i40e_tx_desc)) * que->num_tx_desc); /* Reset indices */ txr->next_avail = 0; txr->next_to_clean = 0; /* Reset watchdog status */ txr->watchdog_timer = 0; /* Free any existing tx mbufs. */ buf = txr->buffers; for (int i = 0; i < que->num_tx_desc; i++, buf++) { if (buf->m_head != NULL) { bus_dmamap_sync(buf->tag, buf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(buf->tag, buf->map); m_freem(buf->m_head); buf->m_head = NULL; } #ifdef DEV_NETMAP /* * In netmap mode, set the map for the packet buffer. * NOTE: Some drivers (not this one) also need to set * the physical buffer address in the NIC ring. * netmap_idx_n2k() maps a nic index, i, into the corresponding * netmap slot index, si */ if (slot) { int si = netmap_idx_n2k(na->tx_rings[que->me], i); netmap_load_map(na, buf->tag, buf->map, NMB(na, slot + si)); } #endif /* DEV_NETMAP */ /* Clear the EOP index */ buf->eop_index = -1; } /* Set number of descriptors available */ txr->avail = que->num_tx_desc; bus_dmamap_sync(txr->dma.tag, txr->dma.map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); IXL_TX_UNLOCK(txr); } /********************************************************************* * * Free transmit ring related data structures. * **********************************************************************/ void ixl_free_que_tx(struct ixl_queue *que) { struct tx_ring *txr = &que->txr; struct ixl_tx_buf *buf; INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me); for (int i = 0; i < que->num_tx_desc; i++) { buf = &txr->buffers[i]; if (buf->m_head != NULL) { bus_dmamap_sync(buf->tag, buf->map, BUS_DMASYNC_POSTWRITE); m_freem(buf->m_head); buf->m_head = NULL; } bus_dmamap_unload(buf->tag, buf->map); bus_dmamap_destroy(buf->tag, buf->map); } if (txr->buffers != NULL) { free(txr->buffers, M_DEVBUF); txr->buffers = NULL; } if (txr->tx_tag != NULL) { bus_dma_tag_destroy(txr->tx_tag); txr->tx_tag = NULL; } if (txr->tso_tag != NULL) { bus_dma_tag_destroy(txr->tso_tag); txr->tso_tag = NULL; } INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me); return; } /********************************************************************* * * Setup descriptor for hw offloads * **********************************************************************/ static int ixl_tx_setup_offload(struct ixl_queue *que, struct mbuf *mp, u32 *cmd, u32 *off) { struct ether_vlan_header *eh; #ifdef INET struct ip *ip = NULL; #endif struct tcphdr *th = NULL; #ifdef INET6 struct ip6_hdr *ip6; #endif int elen, ip_hlen = 0, tcp_hlen; u16 etype; u8 ipproto = 0; bool tso = FALSE; /* Set up the TSO context descriptor if required */ if (mp->m_pkthdr.csum_flags & CSUM_TSO) { tso = ixl_tso_setup(que, mp); if (tso) ++que->tso; else return (ENXIO); } /* * Determine where frame payload starts. * Jump over vlan headers if already present, * helpful for QinQ too. */ eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { etype = ntohs(eh->evl_proto); elen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { etype = ntohs(eh->evl_encap_proto); elen = ETHER_HDR_LEN; } switch (etype) { #ifdef INET case ETHERTYPE_IP: ip = (struct ip *)(mp->m_data + elen); ip_hlen = ip->ip_hl << 2; ipproto = ip->ip_p; th = (struct tcphdr *)((caddr_t)ip + ip_hlen); /* The IP checksum must be recalculated with TSO */ if (tso) *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; else *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4; break; #endif #ifdef INET6 case ETHERTYPE_IPV6: ip6 = (struct ip6_hdr *)(mp->m_data + elen); ip_hlen = sizeof(struct ip6_hdr); ipproto = ip6->ip6_nxt; th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen); *cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; break; #endif default: break; } *off |= (elen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; *off |= (ip_hlen >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; switch (ipproto) { case IPPROTO_TCP: tcp_hlen = th->th_off << 2; if (mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_TCP_IPV6)) { *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; *off |= (tcp_hlen >> 2) << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; } break; case IPPROTO_UDP: if (mp->m_pkthdr.csum_flags & (CSUM_UDP|CSUM_UDP_IPV6)) { *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; *off |= (sizeof(struct udphdr) >> 2) << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; } break; case IPPROTO_SCTP: if (mp->m_pkthdr.csum_flags & (CSUM_SCTP|CSUM_SCTP_IPV6)) { *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; *off |= (sizeof(struct sctphdr) >> 2) << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; } /* Fall Thru */ default: break; } return (0); } /********************************************************************** * * Setup context for hardware segmentation offload (TSO) * **********************************************************************/ static bool ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp) { struct tx_ring *txr = &que->txr; struct i40e_tx_context_desc *TXD; struct ixl_tx_buf *buf; u32 cmd, mss, type, tsolen; u16 etype; int idx, elen, ip_hlen, tcp_hlen; struct ether_vlan_header *eh; #ifdef INET struct ip *ip; #endif #ifdef INET6 struct ip6_hdr *ip6; #endif #if defined(INET6) || defined(INET) struct tcphdr *th; #endif u64 type_cmd_tso_mss; /* * Determine where frame payload starts. * Jump over vlan headers if already present */ eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { elen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; etype = eh->evl_proto; } else { elen = ETHER_HDR_LEN; etype = eh->evl_encap_proto; } switch (ntohs(etype)) { #ifdef INET6 case ETHERTYPE_IPV6: ip6 = (struct ip6_hdr *)(mp->m_data + elen); if (ip6->ip6_nxt != IPPROTO_TCP) return (ENXIO); ip_hlen = sizeof(struct ip6_hdr); th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen); th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); tcp_hlen = th->th_off << 2; /* * The corresponding flag is set by the stack in the IPv4 * TSO case, but not in IPv6 (at least in FreeBSD 10.2). * So, set it here because the rest of the flow requires it. */ mp->m_pkthdr.csum_flags |= CSUM_TCP_IPV6; break; #endif #ifdef INET case ETHERTYPE_IP: ip = (struct ip *)(mp->m_data + elen); if (ip->ip_p != IPPROTO_TCP) return (ENXIO); ip->ip_sum = 0; ip_hlen = ip->ip_hl << 2; th = (struct tcphdr *)((caddr_t)ip + ip_hlen); th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); tcp_hlen = th->th_off << 2; break; #endif default: printf("%s: CSUM_TSO but no supported IP version (0x%04x)", __func__, ntohs(etype)); return FALSE; } /* Ensure we have at least the IP+TCP header in the first mbuf. */ if (mp->m_len < elen + ip_hlen + sizeof(struct tcphdr)) return FALSE; idx = txr->next_avail; buf = &txr->buffers[idx]; TXD = (struct i40e_tx_context_desc *) &txr->base[idx]; tsolen = mp->m_pkthdr.len - (elen + ip_hlen + tcp_hlen); type = I40E_TX_DESC_DTYPE_CONTEXT; cmd = I40E_TX_CTX_DESC_TSO; /* TSO MSS must not be less than 64 */ if (mp->m_pkthdr.tso_segsz < IXL_MIN_TSO_MSS) { que->mss_too_small++; mp->m_pkthdr.tso_segsz = IXL_MIN_TSO_MSS; } mss = mp->m_pkthdr.tso_segsz; type_cmd_tso_mss = ((u64)type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) | ((u64)cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | ((u64)tsolen << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | ((u64)mss << I40E_TXD_CTX_QW1_MSS_SHIFT); TXD->type_cmd_tso_mss = htole64(type_cmd_tso_mss); TXD->tunneling_params = htole32(0); buf->m_head = NULL; buf->eop_index = -1; if (++idx == que->num_tx_desc) idx = 0; txr->avail--; txr->next_avail = idx; return TRUE; } /* * ixl_get_tx_head - Retrieve the value from the * location the HW records its HEAD index */ static inline u32 ixl_get_tx_head(struct ixl_queue *que) { struct tx_ring *txr = &que->txr; void *head = &txr->base[que->num_tx_desc]; return LE32_TO_CPU(*(volatile __le32 *)head); } /********************************************************************** * * Get index of last used descriptor/buffer from hardware, and clean * the descriptors/buffers up to that index. * **********************************************************************/ static bool ixl_txeof_hwb(struct ixl_queue *que) { struct tx_ring *txr = &que->txr; u32 first, last, head, done; struct ixl_tx_buf *buf; struct i40e_tx_desc *tx_desc, *eop_desc; mtx_assert(&txr->mtx, MA_OWNED); #ifdef DEV_NETMAP // XXX todo: implement moderation if (netmap_tx_irq(que->vsi->ifp, que->me)) return FALSE; #endif /* DEF_NETMAP */ /* These are not the descriptors you seek, move along :) */ if (txr->avail == que->num_tx_desc) { atomic_store_rel_32(&txr->watchdog_timer, 0); return FALSE; } first = txr->next_to_clean; buf = &txr->buffers[first]; tx_desc = (struct i40e_tx_desc *)&txr->base[first]; last = buf->eop_index; if (last == -1) return FALSE; eop_desc = (struct i40e_tx_desc *)&txr->base[last]; /* Sync DMA before reading head index from ring */ bus_dmamap_sync(txr->dma.tag, txr->dma.map, BUS_DMASYNC_POSTREAD); /* Get the Head WB value */ head = ixl_get_tx_head(que); /* ** Get the index of the first descriptor ** BEYOND the EOP and call that 'done'. ** I do this so the comparison in the ** inner while loop below can be simple */ if (++last == que->num_tx_desc) last = 0; done = last; /* ** The HEAD index of the ring is written in a ** defined location, this rather than a done bit ** is what is used to keep track of what must be ** 'cleaned'. */ while (first != head) { /* We clean the range of the packet */ while (first != done) { ++txr->avail; if (buf->m_head) { txr->bytes += /* for ITR adjustment */ buf->m_head->m_pkthdr.len; txr->tx_bytes += /* for TX stats */ buf->m_head->m_pkthdr.len; bus_dmamap_sync(buf->tag, buf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(buf->tag, buf->map); m_freem(buf->m_head); buf->m_head = NULL; } buf->eop_index = -1; if (++first == que->num_tx_desc) first = 0; buf = &txr->buffers[first]; tx_desc = &txr->base[first]; } ++txr->packets; /* If a packet was successfully cleaned, reset the watchdog timer */ atomic_store_rel_32(&txr->watchdog_timer, IXL_WATCHDOG); /* See if there is more work now */ last = buf->eop_index; if (last != -1) { eop_desc = &txr->base[last]; /* Get next done point */ if (++last == que->num_tx_desc) last = 0; done = last; } else break; } bus_dmamap_sync(txr->dma.tag, txr->dma.map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); txr->next_to_clean = first; /* * If there are no pending descriptors, clear the timeout. */ if (txr->avail == que->num_tx_desc) { atomic_store_rel_32(&txr->watchdog_timer, 0); return FALSE; } return TRUE; } /********************************************************************** * * Use index kept by driver and the flag on each descriptor to find used * descriptor/buffers and clean them up for re-use. * * This method of reclaiming descriptors is current incompatible with * DEV_NETMAP. * * Returns TRUE if there are more descriptors to be cleaned after this * function exits. * **********************************************************************/ static bool ixl_txeof_dwb(struct ixl_queue *que) { struct tx_ring *txr = &que->txr; u32 first, last, done; u32 limit = 256; struct ixl_tx_buf *buf; struct i40e_tx_desc *tx_desc, *eop_desc; mtx_assert(&txr->mtx, MA_OWNED); /* There are no descriptors to clean */ if (txr->avail == que->num_tx_desc) { atomic_store_rel_32(&txr->watchdog_timer, 0); return FALSE; } /* Set starting index/descriptor/buffer */ first = txr->next_to_clean; buf = &txr->buffers[first]; tx_desc = &txr->base[first]; /* * This function operates per-packet -- identifies the start of the * packet and gets the index of the last descriptor of the packet from * it, from eop_index. * * If the last descriptor is marked "done" by the hardware, then all * of the descriptors for the packet are cleaned. */ last = buf->eop_index; if (last == -1) return FALSE; eop_desc = &txr->base[last]; /* Sync DMA before reading from ring */ bus_dmamap_sync(txr->dma.tag, txr->dma.map, BUS_DMASYNC_POSTREAD); /* * Get the index of the first descriptor beyond the EOP and call that * 'done'. Simplifies the comparison for the inner loop below. */ if (++last == que->num_tx_desc) last = 0; done = last; /* * We find the last completed descriptor by examining each * descriptor's status bits to see if it's done. */ do { /* Break if last descriptor in packet isn't marked done */ if ((eop_desc->cmd_type_offset_bsz & I40E_TXD_QW1_DTYPE_MASK) != I40E_TX_DESC_DTYPE_DESC_DONE) break; /* Clean the descriptors that make up the processed packet */ while (first != done) { /* * If there was a buffer attached to this descriptor, * prevent the adapter from accessing it, and add its * length to the queue's TX stats. */ if (buf->m_head) { txr->bytes += buf->m_head->m_pkthdr.len; txr->tx_bytes += buf->m_head->m_pkthdr.len; bus_dmamap_sync(buf->tag, buf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(buf->tag, buf->map); m_freem(buf->m_head); buf->m_head = NULL; } buf->eop_index = -1; ++txr->avail; if (++first == que->num_tx_desc) first = 0; buf = &txr->buffers[first]; tx_desc = &txr->base[first]; } ++txr->packets; /* If a packet was successfully cleaned, reset the watchdog timer */ atomic_store_rel_32(&txr->watchdog_timer, IXL_WATCHDOG); /* * Since buf is the first buffer after the one that was just * cleaned, check if the packet it starts is done, too. */ last = buf->eop_index; if (last != -1) { eop_desc = &txr->base[last]; /* Get next done point */ if (++last == que->num_tx_desc) last = 0; done = last; } else break; } while (--limit); bus_dmamap_sync(txr->dma.tag, txr->dma.map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); txr->next_to_clean = first; /* * If there are no pending descriptors, clear the watchdog timer. */ if (txr->avail == que->num_tx_desc) { atomic_store_rel_32(&txr->watchdog_timer, 0); return FALSE; } return TRUE; } bool ixl_txeof(struct ixl_queue *que) { struct ixl_vsi *vsi = que->vsi; return (vsi->enable_head_writeback) ? ixl_txeof_hwb(que) : ixl_txeof_dwb(que); } /********************************************************************* * * Refresh mbuf buffers for RX descriptor rings * - now keeps its own state so discards due to resource * exhaustion are unnecessary, if an mbuf cannot be obtained * it just returns, keeping its placeholder, thus it can simply * be recalled to try again. * **********************************************************************/ static void ixl_refresh_mbufs(struct ixl_queue *que, int limit) { struct ixl_vsi *vsi = que->vsi; struct rx_ring *rxr = &que->rxr; bus_dma_segment_t hseg[1]; bus_dma_segment_t pseg[1]; struct ixl_rx_buf *buf; struct mbuf *mh, *mp; int i, j, nsegs, error; bool refreshed = FALSE; i = j = rxr->next_refresh; /* Control the loop with one beyond */ if (++j == que->num_rx_desc) j = 0; while (j != limit) { buf = &rxr->buffers[i]; if (rxr->hdr_split == FALSE) goto no_split; if (buf->m_head == NULL) { mh = m_gethdr(M_NOWAIT, MT_DATA); if (mh == NULL) goto update; } else mh = buf->m_head; mh->m_pkthdr.len = mh->m_len = MHLEN; mh->m_len = MHLEN; mh->m_flags |= M_PKTHDR; /* Get the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->htag, buf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { printf("Refresh mbufs: hdr dmamap load" " failure - %d\n", error); m_free(mh); buf->m_head = NULL; goto update; } buf->m_head = mh; bus_dmamap_sync(rxr->htag, buf->hmap, BUS_DMASYNC_PREREAD); rxr->base[i].read.hdr_addr = htole64(hseg[0].ds_addr); no_split: if (buf->m_pack == NULL) { mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rxr->mbuf_sz); if (mp == NULL) goto update; } else mp = buf->m_pack; mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz; /* Get the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->ptag, buf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { printf("Refresh mbufs: payload dmamap load" " failure - %d\n", error); m_free(mp); buf->m_pack = NULL; goto update; } buf->m_pack = mp; bus_dmamap_sync(rxr->ptag, buf->pmap, BUS_DMASYNC_PREREAD); rxr->base[i].read.pkt_addr = htole64(pseg[0].ds_addr); /* Used only when doing header split */ rxr->base[i].read.hdr_addr = 0; refreshed = TRUE; /* Next is precalculated */ i = j; rxr->next_refresh = i; if (++j == que->num_rx_desc) j = 0; } update: if (refreshed) /* Update hardware tail index */ wr32(vsi->hw, rxr->tail, rxr->next_refresh); return; } /********************************************************************* * * Allocate memory for rx_buffer structures. Since we use one * rx_buffer per descriptor, the maximum number of rx_buffer's * that we'll need is equal to the number of receive descriptors * that we've defined. * **********************************************************************/ int ixl_allocate_rx_data(struct ixl_queue *que) { struct rx_ring *rxr = &que->rxr; struct ixl_vsi *vsi = que->vsi; device_t dev = vsi->dev; struct ixl_rx_buf *buf; int i, bsize, error; if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MSIZE, /* maxsize */ 1, /* nsegments */ MSIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &rxr->htag))) { device_printf(dev, "Unable to create RX DMA htag\n"); return (error); } if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MJUM16BYTES, /* maxsize */ 1, /* nsegments */ MJUM16BYTES, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &rxr->ptag))) { device_printf(dev, "Unable to create RX DMA ptag\n"); goto free_rx_htag; } bsize = sizeof(struct ixl_rx_buf) * que->num_rx_desc; if (!(rxr->buffers = (struct ixl_rx_buf *) malloc(bsize, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate rx_buffer memory\n"); error = ENOMEM; goto free_rx_ptag; } for (i = 0; i < que->num_rx_desc; i++) { buf = &rxr->buffers[i]; error = bus_dmamap_create(rxr->htag, BUS_DMA_NOWAIT, &buf->hmap); if (error) { device_printf(dev, "Unable to create RX head map\n"); goto free_buffers; } error = bus_dmamap_create(rxr->ptag, BUS_DMA_NOWAIT, &buf->pmap); if (error) { bus_dmamap_destroy(rxr->htag, buf->hmap); device_printf(dev, "Unable to create RX pkt map\n"); goto free_buffers; } } return 0; free_buffers: while (i--) { buf = &rxr->buffers[i]; bus_dmamap_destroy(rxr->ptag, buf->pmap); bus_dmamap_destroy(rxr->htag, buf->hmap); } free(rxr->buffers, M_DEVBUF); rxr->buffers = NULL; free_rx_ptag: bus_dma_tag_destroy(rxr->ptag); rxr->ptag = NULL; free_rx_htag: bus_dma_tag_destroy(rxr->htag); rxr->htag = NULL; return (error); } /********************************************************************* * * (Re)Initialize the queue receive ring and its buffers. * **********************************************************************/ int ixl_init_rx_ring(struct ixl_queue *que) { struct rx_ring *rxr = &que->rxr; struct ixl_vsi *vsi = que->vsi; #if defined(INET6) || defined(INET) struct ifnet *ifp = vsi->ifp; struct lro_ctrl *lro = &rxr->lro; #endif struct ixl_rx_buf *buf; bus_dma_segment_t pseg[1], hseg[1]; int rsize, nsegs, error = 0; #ifdef DEV_NETMAP struct netmap_adapter *na = NA(que->vsi->ifp); struct netmap_slot *slot; #endif /* DEV_NETMAP */ IXL_RX_LOCK(rxr); #ifdef DEV_NETMAP /* same as in ixl_init_tx_ring() */ slot = netmap_reset(na, NR_RX, que->me, 0); #endif /* DEV_NETMAP */ /* Clear the ring contents */ rsize = roundup2(que->num_rx_desc * sizeof(union i40e_rx_desc), DBA_ALIGN); bzero((void *)rxr->base, rsize); /* Cleanup any existing buffers */ for (int i = 0; i < que->num_rx_desc; i++) { buf = &rxr->buffers[i]; if (buf->m_head != NULL) { bus_dmamap_sync(rxr->htag, buf->hmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->htag, buf->hmap); buf->m_head->m_flags |= M_PKTHDR; m_freem(buf->m_head); } if (buf->m_pack != NULL) { bus_dmamap_sync(rxr->ptag, buf->pmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->ptag, buf->pmap); buf->m_pack->m_flags |= M_PKTHDR; m_freem(buf->m_pack); } buf->m_head = NULL; buf->m_pack = NULL; } /* header split is off */ rxr->hdr_split = FALSE; /* Now replenish the mbufs */ for (int j = 0; j != que->num_rx_desc; ++j) { struct mbuf *mh, *mp; buf = &rxr->buffers[j]; #ifdef DEV_NETMAP /* * In netmap mode, fill the map and set the buffer * address in the NIC ring, considering the offset * between the netmap and NIC rings (see comment in * ixgbe_setup_transmit_ring() ). No need to allocate * an mbuf, so end the block with a continue; */ if (slot) { int sj = netmap_idx_n2k(na->rx_rings[que->me], j); uint64_t paddr; void *addr; addr = PNMB(na, slot + sj, &paddr); netmap_load_map(na, rxr->dma.tag, buf->pmap, addr); /* Update descriptor and the cached value */ rxr->base[j].read.pkt_addr = htole64(paddr); rxr->base[j].read.hdr_addr = 0; continue; } #endif /* DEV_NETMAP */ /* ** Don't allocate mbufs if not ** doing header split, its wasteful */ if (rxr->hdr_split == FALSE) goto skip_head; /* First the header */ buf->m_head = m_gethdr(M_NOWAIT, MT_DATA); if (buf->m_head == NULL) { error = ENOBUFS; goto fail; } m_adj(buf->m_head, ETHER_ALIGN); mh = buf->m_head; mh->m_len = mh->m_pkthdr.len = MHLEN; mh->m_flags |= M_PKTHDR; /* Get the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->htag, buf->hmap, buf->m_head, hseg, &nsegs, BUS_DMA_NOWAIT); if (error != 0) /* Nothing elegant to do here */ goto fail; bus_dmamap_sync(rxr->htag, buf->hmap, BUS_DMASYNC_PREREAD); /* Update descriptor */ rxr->base[j].read.hdr_addr = htole64(hseg[0].ds_addr); skip_head: /* Now the payload cluster */ buf->m_pack = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rxr->mbuf_sz); if (buf->m_pack == NULL) { error = ENOBUFS; goto fail; } mp = buf->m_pack; mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz; /* Get the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->ptag, buf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT); if (error != 0) goto fail; bus_dmamap_sync(rxr->ptag, buf->pmap, BUS_DMASYNC_PREREAD); /* Update descriptor */ rxr->base[j].read.pkt_addr = htole64(pseg[0].ds_addr); rxr->base[j].read.hdr_addr = 0; } /* Setup our descriptor indices */ rxr->next_check = 0; rxr->next_refresh = 0; rxr->lro_enabled = FALSE; rxr->split = 0; rxr->bytes = 0; rxr->discard = FALSE; wr32(vsi->hw, rxr->tail, que->num_rx_desc - 1); ixl_flush(vsi->hw); #if defined(INET6) || defined(INET) /* ** Now set up the LRO interface: */ if (ifp->if_capenable & IFCAP_LRO) { int err = tcp_lro_init(lro); if (err) { if_printf(ifp, "queue %d: LRO Initialization failed!\n", que->me); goto fail; } INIT_DBG_IF(ifp, "queue %d: RX Soft LRO Initialized", que->me); rxr->lro_enabled = TRUE; lro->ifp = vsi->ifp; } #endif bus_dmamap_sync(rxr->dma.tag, rxr->dma.map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); fail: IXL_RX_UNLOCK(rxr); return (error); } /********************************************************************* * * Free station receive ring data structures * **********************************************************************/ void ixl_free_que_rx(struct ixl_queue *que) { struct rx_ring *rxr = &que->rxr; struct ixl_rx_buf *buf; /* Cleanup any existing buffers */ if (rxr->buffers != NULL) { for (int i = 0; i < que->num_rx_desc; i++) { buf = &rxr->buffers[i]; /* Free buffers and unload dma maps */ ixl_rx_discard(rxr, i); bus_dmamap_destroy(rxr->htag, buf->hmap); bus_dmamap_destroy(rxr->ptag, buf->pmap); } free(rxr->buffers, M_DEVBUF); rxr->buffers = NULL; } if (rxr->htag != NULL) { bus_dma_tag_destroy(rxr->htag); rxr->htag = NULL; } if (rxr->ptag != NULL) { bus_dma_tag_destroy(rxr->ptag); rxr->ptag = NULL; } } static inline void ixl_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype) { #if defined(INET6) || defined(INET) /* * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet * should be computed by hardware. Also it should not have VLAN tag in * ethernet header. */ if (rxr->lro_enabled && (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { /* * Send to the stack if: ** - LRO not enabled, or ** - no LRO resources, or ** - lro enqueue fails */ if (rxr->lro.lro_cnt != 0) if (tcp_lro_rx(&rxr->lro, m, 0) == 0) return; } #endif IXL_RX_UNLOCK(rxr); (*ifp->if_input)(ifp, m); IXL_RX_LOCK(rxr); } static inline void ixl_rx_discard(struct rx_ring *rxr, int i) { struct ixl_rx_buf *rbuf; KASSERT(rxr != NULL, ("Receive ring pointer cannot be null")); KASSERT(i < rxr->que->num_rx_desc, ("Descriptor index must be less than que->num_rx_desc")); rbuf = &rxr->buffers[i]; /* Free the mbufs in the current chain for the packet */ if (rbuf->fmp != NULL) { bus_dmamap_sync(rxr->ptag, rbuf->pmap, BUS_DMASYNC_POSTREAD); m_freem(rbuf->fmp); rbuf->fmp = NULL; } /* * Free the mbufs for the current descriptor; and let ixl_refresh_mbufs() * assign new mbufs to these. */ if (rbuf->m_head) { bus_dmamap_sync(rxr->htag, rbuf->hmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->htag, rbuf->hmap); m_free(rbuf->m_head); rbuf->m_head = NULL; } if (rbuf->m_pack) { bus_dmamap_sync(rxr->ptag, rbuf->pmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->ptag, rbuf->pmap); m_free(rbuf->m_pack); rbuf->m_pack = NULL; } } #ifdef RSS /* ** i40e_ptype_to_hash: parse the packet type ** to determine the appropriate hash. */ static inline int ixl_ptype_to_hash(u8 ptype) { struct i40e_rx_ptype_decoded decoded; u8 ex = 0; decoded = decode_rx_desc_ptype(ptype); ex = decoded.outer_frag; if (!decoded.known) return M_HASHTYPE_OPAQUE_HASH; if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_L2) return M_HASHTYPE_OPAQUE_HASH; /* Note: anything that gets to this point is IP */ if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) { switch (decoded.inner_prot) { case I40E_RX_PTYPE_INNER_PROT_TCP: if (ex) return M_HASHTYPE_RSS_TCP_IPV6_EX; else return M_HASHTYPE_RSS_TCP_IPV6; case I40E_RX_PTYPE_INNER_PROT_UDP: if (ex) return M_HASHTYPE_RSS_UDP_IPV6_EX; else return M_HASHTYPE_RSS_UDP_IPV6; default: if (ex) return M_HASHTYPE_RSS_IPV6_EX; else return M_HASHTYPE_RSS_IPV6; } } if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) { switch (decoded.inner_prot) { case I40E_RX_PTYPE_INNER_PROT_TCP: return M_HASHTYPE_RSS_TCP_IPV4; case I40E_RX_PTYPE_INNER_PROT_UDP: if (ex) return M_HASHTYPE_RSS_UDP_IPV4_EX; else return M_HASHTYPE_RSS_UDP_IPV4; default: return M_HASHTYPE_RSS_IPV4; } } /* We should never get here!! */ return M_HASHTYPE_OPAQUE_HASH; } #endif /* RSS */ /********************************************************************* * * This routine executes in interrupt context. It replenishes * the mbufs in the descriptor and sends data which has been * dma'ed into host memory to upper layer. * * We loop at most count times if count is > 0, or until done if * count < 0. * * Return TRUE for more work, FALSE for all clean. *********************************************************************/ bool ixl_rxeof(struct ixl_queue *que, int count) { struct ixl_vsi *vsi = que->vsi; struct rx_ring *rxr = &que->rxr; struct ifnet *ifp = vsi->ifp; #if defined(INET6) || defined(INET) struct lro_ctrl *lro = &rxr->lro; #endif int i, nextp, processed = 0; union i40e_rx_desc *cur; struct ixl_rx_buf *rbuf, *nbuf; IXL_RX_LOCK(rxr); #ifdef DEV_NETMAP if (netmap_rx_irq(ifp, que->me, &count)) { IXL_RX_UNLOCK(rxr); return (FALSE); } #endif /* DEV_NETMAP */ for (i = rxr->next_check; count != 0;) { struct mbuf *sendmp, *mh, *mp; u32 status, error; u16 hlen, plen, vtag; u64 qword; u8 ptype; bool eop; /* Sync the ring. */ bus_dmamap_sync(rxr->dma.tag, rxr->dma.map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); cur = &rxr->base[i]; qword = le64toh(cur->wb.qword1.status_error_len); status = (qword & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT; error = (qword & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT; plen = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; hlen = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT; ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; if ((status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) == 0) { ++rxr->not_done; break; } if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; count--; sendmp = NULL; nbuf = NULL; cur->wb.qword1.status_error_len = 0; rbuf = &rxr->buffers[i]; mh = rbuf->m_head; mp = rbuf->m_pack; eop = (status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)); if (status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) vtag = le16toh(cur->wb.qword0.lo_dword.l2tag1); else vtag = 0; /* Remove device access to the rx buffers. */ if (rbuf->m_head != NULL) { bus_dmamap_sync(rxr->htag, rbuf->hmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->htag, rbuf->hmap); } if (rbuf->m_pack != NULL) { bus_dmamap_sync(rxr->ptag, rbuf->pmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->ptag, rbuf->pmap); } /* ** Make sure bad packets are discarded, ** note that only EOP descriptor has valid ** error results. */ if (eop && (error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { rxr->desc_errs++; ixl_rx_discard(rxr, i); goto next_desc; } /* Prefetch the next buffer */ if (!eop) { nextp = i + 1; if (nextp == que->num_rx_desc) nextp = 0; nbuf = &rxr->buffers[nextp]; prefetch(nbuf); } /* ** The header mbuf is ONLY used when header ** split is enabled, otherwise we get normal ** behavior, ie, both header and payload ** are DMA'd into the payload buffer. ** ** Rather than using the fmp/lmp global pointers ** we now keep the head of a packet chain in the ** buffer struct and pass this along from one ** descriptor to the next, until we get EOP. */ if (rxr->hdr_split && (rbuf->fmp == NULL)) { if (hlen > IXL_RX_HDR) hlen = IXL_RX_HDR; mh->m_len = hlen; mh->m_flags |= M_PKTHDR; mh->m_next = NULL; mh->m_pkthdr.len = mh->m_len; /* Null buf pointer so it is refreshed */ rbuf->m_head = NULL; /* ** Check the payload length, this ** could be zero if its a small ** packet. */ if (plen > 0) { mp->m_len = plen; mp->m_next = NULL; mp->m_flags &= ~M_PKTHDR; mh->m_next = mp; mh->m_pkthdr.len += mp->m_len; /* Null buf pointer so it is refreshed */ rbuf->m_pack = NULL; rxr->split++; } /* ** Now create the forward ** chain so when complete ** we wont have to. */ if (eop == 0) { /* stash the chain head */ nbuf->fmp = mh; /* Make forward chain */ if (plen) mp->m_next = nbuf->m_pack; else mh->m_next = nbuf->m_pack; } else { /* Singlet, prepare to send */ sendmp = mh; if (vtag) { sendmp->m_pkthdr.ether_vtag = vtag; sendmp->m_flags |= M_VLANTAG; } } } else { /* ** Either no header split, or a ** secondary piece of a fragmented ** split packet. */ mp->m_len = plen; /* ** See if there is a stored head ** that determines what we are */ sendmp = rbuf->fmp; rbuf->m_pack = rbuf->fmp = NULL; if (sendmp != NULL) /* secondary frag */ sendmp->m_pkthdr.len += mp->m_len; else { /* first desc of a non-ps chain */ sendmp = mp; sendmp->m_flags |= M_PKTHDR; sendmp->m_pkthdr.len = mp->m_len; } /* Pass the head pointer on */ if (eop == 0) { nbuf->fmp = sendmp; sendmp = NULL; mp->m_next = nbuf->m_pack; } } ++processed; /* Sending this frame? */ if (eop) { sendmp->m_pkthdr.rcvif = ifp; /* gather stats */ rxr->rx_packets++; rxr->rx_bytes += sendmp->m_pkthdr.len; /* capture data for dynamic ITR adjustment */ rxr->packets++; rxr->bytes += sendmp->m_pkthdr.len; /* Set VLAN tag (field only valid in eop desc) */ if (vtag) { sendmp->m_pkthdr.ether_vtag = vtag; sendmp->m_flags |= M_VLANTAG; } if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) ixl_rx_checksum(sendmp, status, error, ptype); #ifdef RSS sendmp->m_pkthdr.flowid = le32toh(cur->wb.qword0.hi_dword.rss); M_HASHTYPE_SET(sendmp, ixl_ptype_to_hash(ptype)); #else sendmp->m_pkthdr.flowid = que->msix; M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE); #endif } next_desc: bus_dmamap_sync(rxr->dma.tag, rxr->dma.map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Advance our pointers to the next descriptor. */ if (++i == que->num_rx_desc) i = 0; /* Now send to the stack or do LRO */ if (sendmp != NULL) { rxr->next_check = i; ixl_rx_input(rxr, ifp, sendmp, ptype); /* * Update index used in loop in case another * ixl_rxeof() call executes when lock is released */ i = rxr->next_check; } /* Every 8 descriptors we go to refresh mbufs */ if (processed == 8) { ixl_refresh_mbufs(que, i); processed = 0; } } /* Refresh any remaining buf structs */ if (ixl_rx_unrefreshed(que)) ixl_refresh_mbufs(que, i); rxr->next_check = i; #if defined(INET6) || defined(INET) /* * Flush any outstanding LRO work */ #if __FreeBSD_version >= 1100105 tcp_lro_flush_all(lro); #else struct lro_entry *queued; while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) { SLIST_REMOVE_HEAD(&lro->lro_active, next); tcp_lro_flush(lro, queued); } #endif #endif /* defined(INET6) || defined(INET) */ IXL_RX_UNLOCK(rxr); return (FALSE); } /********************************************************************* * * Verify that the hardware indicated that the checksum is valid. * Inform the stack about the status of checksum so that stack * doesn't spend time verifying the checksum. * *********************************************************************/ static void ixl_rx_checksum(struct mbuf * mp, u32 status, u32 error, u8 ptype) { struct i40e_rx_ptype_decoded decoded; decoded = decode_rx_desc_ptype(ptype); /* Errors? */ if (error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) { mp->m_pkthdr.csum_flags = 0; return; } /* IPv6 with extension headers likely have bad csum */ if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) if (status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) { mp->m_pkthdr.csum_flags = 0; return; } /* IP Checksum Good */ mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; if (status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)) { mp->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); mp->m_pkthdr.csum_data |= htons(0xffff); } return; } #if __FreeBSD_version >= 1100000 uint64_t ixl_get_counter(if_t ifp, ift_counter cnt) { struct ixl_vsi *vsi; vsi = if_getsoftc(ifp); switch (cnt) { case IFCOUNTER_IPACKETS: return (vsi->ipackets); case IFCOUNTER_IERRORS: return (vsi->ierrors); case IFCOUNTER_OPACKETS: return (vsi->opackets); case IFCOUNTER_OERRORS: return (vsi->oerrors); case IFCOUNTER_COLLISIONS: /* Collisions are by standard impossible in 40G/10G Ethernet */ return (0); case IFCOUNTER_IBYTES: return (vsi->ibytes); case IFCOUNTER_OBYTES: return (vsi->obytes); case IFCOUNTER_IMCASTS: return (vsi->imcasts); case IFCOUNTER_OMCASTS: return (vsi->omcasts); case IFCOUNTER_IQDROPS: return (vsi->iqdrops); case IFCOUNTER_OQDROPS: return (vsi->oqdrops); case IFCOUNTER_NOPROTO: return (vsi->noproto); default: return (if_get_counter_default(ifp, cnt)); } } #endif /* * Set TX and RX ring size adjusting value to supported range */ void ixl_vsi_setup_rings_size(struct ixl_vsi * vsi, int tx_ring_size, int rx_ring_size) { struct device * dev = vsi->dev; if (tx_ring_size < IXL_MIN_RING || tx_ring_size > IXL_MAX_RING || tx_ring_size % IXL_RING_INCREMENT != 0) { device_printf(dev, "Invalid tx_ring_size value of %d set!\n", tx_ring_size); device_printf(dev, "tx_ring_size must be between %d and %d, " "inclusive, and must be a multiple of %d\n", IXL_MIN_RING, IXL_MAX_RING, IXL_RING_INCREMENT); device_printf(dev, "Using default value of %d instead\n", IXL_DEFAULT_RING); vsi->num_tx_desc = IXL_DEFAULT_RING; } else vsi->num_tx_desc = tx_ring_size; if (rx_ring_size < IXL_MIN_RING || rx_ring_size > IXL_MAX_RING || rx_ring_size % IXL_RING_INCREMENT != 0) { device_printf(dev, "Invalid rx_ring_size value of %d set!\n", rx_ring_size); device_printf(dev, "rx_ring_size must be between %d and %d, " "inclusive, and must be a multiple of %d\n", IXL_MIN_RING, IXL_MAX_RING, IXL_RING_INCREMENT); device_printf(dev, "Using default value of %d instead\n", IXL_DEFAULT_RING); vsi->num_rx_desc = IXL_DEFAULT_RING; } else vsi->num_rx_desc = rx_ring_size; device_printf(dev, "using %d tx descriptors and %d rx descriptors\n", vsi->num_tx_desc, vsi->num_rx_desc); } +void +ixl_vsi_add_queues_stats(struct ixl_vsi * vsi) +{ + char queue_namebuf[IXL_QUEUE_NAME_LEN]; + struct sysctl_oid_list *vsi_list, *queue_list; + struct ixl_queue *queues = vsi->queues; + struct sysctl_oid *queue_node; + struct sysctl_ctx_list *ctx; + struct tx_ring *txr; + struct rx_ring *rxr; + + vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); + ctx = &vsi->sysctl_ctx; + + /* Queue statistics */ + for (int q = 0; q < vsi->num_queues; q++) { + snprintf(queue_namebuf, IXL_QUEUE_NAME_LEN, "que%d", q); + queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, + OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #"); + queue_list = SYSCTL_CHILDREN(queue_node); + + txr = &(queues[q].txr); + rxr = &(queues[q].rxr); + + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed", + CTLFLAG_RD, &(queues[q].mbuf_defrag_failed), + "m_defrag() failed"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", + CTLFLAG_RD, &(queues[q].irqs), + "irqs on this queue"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", + CTLFLAG_RD, &(queues[q].tso), + "TSO"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed", + CTLFLAG_RD, &(queues[q].tx_dmamap_failed), + "Driver tx dma failure in xmit"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small", + CTLFLAG_RD, &(queues[q].mss_too_small), + "TSO sends with an MSS less than 64"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", + CTLFLAG_RD, &(txr->no_desc), + "Queue No Descriptor Available"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", + CTLFLAG_RD, &(txr->total_packets), + "Queue Packets Transmitted"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes", + CTLFLAG_RD, &(txr->tx_bytes), + "Queue Bytes Transmitted"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", + CTLFLAG_RD, &(rxr->rx_packets), + "Queue Packets Received"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", + CTLFLAG_RD, &(rxr->rx_bytes), + "Queue Bytes Received"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_desc_err", + CTLFLAG_RD, &(rxr->desc_errs), + "Queue Rx Descriptor Errors"); + SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr", + CTLFLAG_RD, &(rxr->itr), 0, + "Queue Rx ITR Interval"); + SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr", + CTLFLAG_RD, &(txr->itr), 0, + "Queue Tx ITR Interval"); +#ifdef IXL_DEBUG + SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "txr_watchdog", + CTLFLAG_RD, &(txr->watchdog_timer), 0, + "Ticks before watchdog timer causes interface reinit"); + SYSCTL_ADD_U16(ctx, queue_list, OID_AUTO, "tx_next_avail", + CTLFLAG_RD, &(txr->next_avail), 0, + "Next TX descriptor to be used"); + SYSCTL_ADD_U16(ctx, queue_list, OID_AUTO, "tx_next_to_clean", + CTLFLAG_RD, &(txr->next_to_clean), 0, + "Next TX descriptor to be cleaned"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done", + CTLFLAG_RD, &(rxr->not_done), + "Queue Rx Descriptors not Done"); + SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_refresh", + CTLFLAG_RD, &(rxr->next_refresh), 0, + "Queue Rx Descriptors not Done"); + SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check", + CTLFLAG_RD, &(rxr->next_check), 0, + "Queue Rx Descriptors not Done"); + SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail", + CTLTYPE_UINT | CTLFLAG_RD, &queues[q], + sizeof(struct ixl_queue), + ixl_sysctl_qrx_tail_handler, "IU", + "Queue Receive Descriptor Tail"); + SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail", + CTLTYPE_UINT | CTLFLAG_RD, &queues[q], + sizeof(struct ixl_queue), + ixl_sysctl_qtx_tail_handler, "IU", + "Queue Transmit Descriptor Tail"); +#endif + } + +} + +void +ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx, + struct sysctl_oid_list *child, + struct i40e_eth_stats *eth_stats) +{ + struct ixl_sysctl_info ctls[] = + { + {ð_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"}, + {ð_stats->rx_unicast, "ucast_pkts_rcvd", + "Unicast Packets Received"}, + {ð_stats->rx_multicast, "mcast_pkts_rcvd", + "Multicast Packets Received"}, + {ð_stats->rx_broadcast, "bcast_pkts_rcvd", + "Broadcast Packets Received"}, + {ð_stats->rx_discards, "rx_discards", "Discarded RX packets"}, + {ð_stats->rx_unknown_protocol, "rx_unknown_proto", + "RX unknown protocol packets"}, + {ð_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"}, + {ð_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"}, + {ð_stats->tx_multicast, "mcast_pkts_txd", + "Multicast Packets Transmitted"}, + {ð_stats->tx_broadcast, "bcast_pkts_txd", + "Broadcast Packets Transmitted"}, + {ð_stats->tx_errors, "tx_errors", "TX packet errors"}, + // end + {0,0,0} + }; + + struct ixl_sysctl_info *entry = ctls; + + while (entry->stat != 0) + { + SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name, + CTLFLAG_RD, entry->stat, + entry->description); + entry++; + } +} + +#ifdef IXL_DEBUG +/** + * ixl_sysctl_qtx_tail_handler + * Retrieves I40E_QTX_TAIL value from hardware + * for a sysctl. + */ +static int +ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS) +{ + struct ixl_queue *que; + int error; + u32 val; + + que = ((struct ixl_queue *)oidp->oid_arg1); + if (!que) return 0; + + val = rd32(que->vsi->hw, que->txr.tail); + error = sysctl_handle_int(oidp, &val, 0, req); + if (error || !req->newptr) + return error; + return (0); +} + +/** + * ixl_sysctl_qrx_tail_handler + * Retrieves I40E_QRX_TAIL value from hardware + * for a sysctl. + */ +static int +ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS) +{ + struct ixl_queue *que; + int error; + u32 val; + + que = ((struct ixl_queue *)oidp->oid_arg1); + if (!que) return 0; + + val = rd32(que->vsi->hw, que->rxr.tail); + error = sysctl_handle_int(oidp, &val, 0, req); + if (error || !req->newptr) + return error; + return (0); +} +#endif + static void ixl_queue_sw_irq(struct ixl_vsi *vsi, int qidx) { struct i40e_hw *hw = vsi->hw; u32 reg, mask; - if ((vsi->flags & IXL_FLAGS_IS_VF) != 0) { + if (IXL_VSI_IS_PF(vsi)) { + mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK); + + reg = ((vsi->flags & IXL_FLAGS_USES_MSIX) != 0) ? + I40E_PFINT_DYN_CTLN(qidx) : I40E_PFINT_DYN_CTL0; + } else { mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK | I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK); reg = I40E_VFINT_DYN_CTLN1(qidx); - } else { - mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | - I40E_PFINT_DYN_CTLN_ITR_INDX_MASK); - - reg = ((vsi->flags & IXL_FLAGS_USES_MSIX) != 0) ? - I40E_PFINT_DYN_CTLN(qidx) : I40E_PFINT_DYN_CTL0; } wr32(hw, reg, mask); } int ixl_queue_hang_check(struct ixl_vsi *vsi) { struct ixl_queue *que = vsi->queues; device_t dev = vsi->dev; struct tx_ring *txr; s32 timer, new_timer; int hung = 0; for (int i = 0; i < vsi->num_queues; i++, que++) { txr = &que->txr; /* * If watchdog_timer is equal to defualt value set by ixl_txeof * just substract hz and move on - the queue is most probably * running. Otherwise check the value. */ if (atomic_cmpset_rel_32(&txr->watchdog_timer, IXL_WATCHDOG, (IXL_WATCHDOG) - hz) == 0) { timer = atomic_load_acq_32(&txr->watchdog_timer); /* * Again - if the timer was reset to default value * then queue is running. Otherwise check if watchdog * expired and act accrdingly. */ if (timer > 0 && timer != IXL_WATCHDOG) { new_timer = timer - hz; if (new_timer <= 0) { atomic_store_rel_32(&txr->watchdog_timer, -1); device_printf(dev, "WARNING: queue %d " "appears to be hung!\n", que->me); ++hung; /* Try to unblock the queue with SW IRQ */ ixl_queue_sw_irq(vsi, i); } else { /* * If this fails, that means something in the TX path * has updated the watchdog, so it means the TX path * is still working and the watchdog doesn't need * to countdown. */ atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer); } } } } return (hung); } Index: releng/11.3/sys/dev/ixl/ixlv.h =================================================================== --- releng/11.3/sys/dev/ixl/ixlv.h (revision 349180) +++ releng/11.3/sys/dev/ixl/ixlv.h (revision 349181) @@ -1,238 +1,237 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _IXLV_H_ #define _IXLV_H_ #include "ixlv_vc_mgr.h" #define IXLV_AQ_MAX_ERR 30 #define IXLV_MAX_INIT_WAIT 120 #define IXLV_MAX_FILTERS 128 #define IXLV_MAX_QUEUES 16 #define IXLV_AQ_TIMEOUT (1 * hz) #define IXLV_CALLOUT_TIMO (hz / 50) /* 20 msec */ #define IXLV_FLAG_AQ_ENABLE_QUEUES (u32)(1 << 0) #define IXLV_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1) #define IXLV_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2) #define IXLV_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3) #define IXLV_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4) #define IXLV_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5) #define IXLV_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6) #define IXLV_FLAG_AQ_MAP_VECTORS (u32)(1 << 7) #define IXLV_FLAG_AQ_HANDLE_RESET (u32)(1 << 8) #define IXLV_FLAG_AQ_CONFIGURE_PROMISC (u32)(1 << 9) #define IXLV_FLAG_AQ_GET_STATS (u32)(1 << 10) #define IXLV_FLAG_AQ_CONFIG_RSS_KEY (u32)(1 << 11) #define IXLV_FLAG_AQ_SET_RSS_HENA (u32)(1 << 12) #define IXLV_FLAG_AQ_GET_RSS_HENA_CAPS (u32)(1 << 13) #define IXLV_FLAG_AQ_CONFIG_RSS_LUT (u32)(1 << 14) /* printf %b arg */ #define IXLV_FLAGS \ "\20\1ENABLE_QUEUES\2DISABLE_QUEUES\3ADD_MAC_FILTER" \ "\4ADD_VLAN_FILTER\5DEL_MAC_FILTER\6DEL_VLAN_FILTER" \ "\7CONFIGURE_QUEUES\10MAP_VECTORS\11HANDLE_RESET" \ "\12CONFIGURE_PROMISC\13GET_STATS" #define IXLV_PRINTF_VF_OFFLOAD_FLAGS \ "\20\1I40E_VIRTCHNL_VF_OFFLOAD_L2" \ "\2I40E_VIRTCHNL_VF_OFFLOAD_IWARP" \ "\3I40E_VIRTCHNL_VF_OFFLOAD_FCOE" \ "\4I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ" \ "\5I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG" \ "\6I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR" \ "\21I40E_VIRTCHNL_VF_OFFLOAD_VLAN" \ "\22I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING" \ "\23I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2" \ "\24I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF" /* Driver state */ enum ixlv_state_t { IXLV_START, IXLV_FAILED, IXLV_RESET_REQUIRED, IXLV_RESET_PENDING, IXLV_VERSION_CHECK, IXLV_GET_RESOURCES, IXLV_INIT_READY, IXLV_INIT_START, IXLV_INIT_CONFIG, IXLV_INIT_MAPPING, IXLV_INIT_ENABLE, IXLV_INIT_COMPLETE, IXLV_RUNNING, }; /* Structs */ struct ixlv_mac_filter { SLIST_ENTRY(ixlv_mac_filter) next; u8 macaddr[ETHER_ADDR_LEN]; u16 flags; }; SLIST_HEAD(mac_list, ixlv_mac_filter); struct ixlv_vlan_filter { SLIST_ENTRY(ixlv_vlan_filter) next; u16 vlan; u16 flags; }; SLIST_HEAD(vlan_list, ixlv_vlan_filter); /* Software controller structure */ struct ixlv_sc { struct i40e_hw hw; struct i40e_osdep osdep; struct device *dev; struct resource *pci_mem; struct resource *msix_mem; enum ixlv_state_t init_state; int init_in_progress; /* * Interrupt resources */ void *tag; struct resource *res; /* For the AQ */ struct ifmedia media; struct callout timer; int msix; int pf_version; int if_flags; bool link_up; enum virtchnl_link_speed link_speed; struct mtx mtx; - u32 qbase; u32 admvec; struct timeout_task timeout; struct task aq_irq; struct task aq_sched; struct taskqueue *tq; struct ixl_vsi vsi; /* Filter lists */ struct mac_list *mac_filters; struct vlan_list *vlan_filters; /* Promiscuous mode */ u32 promiscuous_flags; /* Admin queue task flags */ u32 aq_wait_count; struct ixl_vc_mgr vc_mgr; struct ixl_vc_cmd add_mac_cmd; struct ixl_vc_cmd del_mac_cmd; struct ixl_vc_cmd config_queues_cmd; struct ixl_vc_cmd map_vectors_cmd; struct ixl_vc_cmd enable_queues_cmd; struct ixl_vc_cmd add_vlan_cmd; struct ixl_vc_cmd del_vlan_cmd; struct ixl_vc_cmd add_multi_cmd; struct ixl_vc_cmd del_multi_cmd; struct ixl_vc_cmd config_rss_key_cmd; struct ixl_vc_cmd get_rss_hena_caps_cmd; struct ixl_vc_cmd set_rss_hena_cmd; struct ixl_vc_cmd config_rss_lut_cmd; /* Virtual comm channel */ struct virtchnl_vf_resource *vf_res; struct virtchnl_vsi_resource *vsi_res; /* Misc stats maintained by the driver */ u64 watchdog_events; u64 admin_irq; u8 aq_buffer[IXL_AQ_BUF_SZ]; }; #define IXLV_CORE_LOCK_ASSERT(sc) mtx_assert(&(sc)->mtx, MA_OWNED) /* ** This checks for a zero mac addr, something that will be likely ** unless the Admin on the Host has created one. */ static inline bool ixlv_check_ether_addr(u8 *addr) { bool status = TRUE; if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 && addr[3] == 0 && addr[4]== 0 && addr[5] == 0)) status = FALSE; return (status); } /* ** VF Common function prototypes */ int ixlv_send_api_ver(struct ixlv_sc *); int ixlv_verify_api_ver(struct ixlv_sc *); int ixlv_send_vf_config_msg(struct ixlv_sc *); int ixlv_get_vf_config(struct ixlv_sc *); void ixlv_init(void *); int ixlv_reinit_locked(struct ixlv_sc *); void ixlv_configure_queues(struct ixlv_sc *); void ixlv_enable_queues(struct ixlv_sc *); void ixlv_disable_queues(struct ixlv_sc *); void ixlv_map_queues(struct ixlv_sc *); void ixlv_enable_intr(struct ixl_vsi *); void ixlv_disable_intr(struct ixl_vsi *); void ixlv_add_ether_filters(struct ixlv_sc *); void ixlv_del_ether_filters(struct ixlv_sc *); void ixlv_request_stats(struct ixlv_sc *); void ixlv_request_reset(struct ixlv_sc *); void ixlv_vc_completion(struct ixlv_sc *, enum virtchnl_ops, enum virtchnl_status_code, u8 *, u16); void ixlv_add_ether_filter(struct ixlv_sc *); void ixlv_add_vlans(struct ixlv_sc *); void ixlv_del_vlans(struct ixlv_sc *); void ixlv_update_stats_counters(struct ixlv_sc *, struct i40e_eth_stats *); void ixlv_update_link_status(struct ixlv_sc *); void ixlv_get_default_rss_key(u32 *, bool); void ixlv_config_rss_key(struct ixlv_sc *); void ixlv_set_rss_hena(struct ixlv_sc *); void ixlv_config_rss_lut(struct ixlv_sc *); #endif /* _IXLV_H_ */ Index: releng/11.3/sys/dev/ixl/ixlv_vc_mgr.h =================================================================== --- releng/11.3/sys/dev/ixl/ixlv_vc_mgr.h (revision 349180) +++ releng/11.3/sys/dev/ixl/ixlv_vc_mgr.h (revision 349181) @@ -1,76 +1,76 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _IXLV_VC_MGR_H_ #define _IXLV_VC_MGR_H_ #include struct ixl_vc_cmd; typedef void ixl_vc_callback_t(struct ixl_vc_cmd *, void *, enum i40e_status_code); #define IXLV_VC_CMD_FLAG_BUSY 0x0001 struct ixl_vc_cmd { uint32_t request; uint32_t flags; ixl_vc_callback_t *callback; void *arg; TAILQ_ENTRY(ixl_vc_cmd) next; }; struct ixl_vc_mgr { struct ixlv_sc *sc; struct ixl_vc_cmd *current; struct callout callout; TAILQ_HEAD(, ixl_vc_cmd) pending; }; #define IXLV_VC_TIMEOUT (2 * hz) void ixl_vc_init_mgr(struct ixlv_sc *, struct ixl_vc_mgr *); void ixl_vc_enqueue(struct ixl_vc_mgr *, struct ixl_vc_cmd *, uint32_t, ixl_vc_callback_t *, void *); void ixl_vc_flush(struct ixl_vc_mgr *mgr); #endif Index: releng/11.3/sys/dev/ixl/ixlvc.c =================================================================== --- releng/11.3/sys/dev/ixl/ixlvc.c (revision 349180) +++ releng/11.3/sys/dev/ixl/ixlvc.c (revision 349181) @@ -1,1258 +1,1258 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ /* ** Virtual Channel support ** These are support functions to communication ** between the VF and PF drivers. */ #include "ixl.h" #include "ixlv.h" #include "i40e_prototype.h" /* busy wait delay in msec */ #define IXLV_BUSY_WAIT_DELAY 10 #define IXLV_BUSY_WAIT_COUNT 50 static void ixl_vc_process_resp(struct ixl_vc_mgr *, uint32_t, enum virtchnl_status_code); static void ixl_vc_process_next(struct ixl_vc_mgr *mgr); static void ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr); static void ixl_vc_send_current(struct ixl_vc_mgr *mgr); #ifdef IXL_DEBUG /* ** Validate VF messages */ static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode, u8 *msg, u16 msglen) { bool err_msg_format = false; int valid_len; /* Validate message length. */ switch (v_opcode) { case VIRTCHNL_OP_VERSION: valid_len = sizeof(struct virtchnl_version_info); break; case VIRTCHNL_OP_RESET_VF: valid_len = 0; break; case VIRTCHNL_OP_GET_VF_RESOURCES: /* Valid length in api v1.0 is 0, v1.1 is 4 */ valid_len = 4; break; case VIRTCHNL_OP_CONFIG_TX_QUEUE: valid_len = sizeof(struct virtchnl_txq_info); break; case VIRTCHNL_OP_CONFIG_RX_QUEUE: valid_len = sizeof(struct virtchnl_rxq_info); break; case VIRTCHNL_OP_CONFIG_VSI_QUEUES: valid_len = sizeof(struct virtchnl_vsi_queue_config_info); if (msglen >= valid_len) { struct virtchnl_vsi_queue_config_info *vqc = (struct virtchnl_vsi_queue_config_info *)msg; valid_len += (vqc->num_queue_pairs * sizeof(struct virtchnl_queue_pair_info)); if (vqc->num_queue_pairs == 0) err_msg_format = true; } break; case VIRTCHNL_OP_CONFIG_IRQ_MAP: valid_len = sizeof(struct virtchnl_irq_map_info); if (msglen >= valid_len) { struct virtchnl_irq_map_info *vimi = (struct virtchnl_irq_map_info *)msg; valid_len += (vimi->num_vectors * sizeof(struct virtchnl_vector_map)); if (vimi->num_vectors == 0) err_msg_format = true; } break; case VIRTCHNL_OP_ENABLE_QUEUES: case VIRTCHNL_OP_DISABLE_QUEUES: valid_len = sizeof(struct virtchnl_queue_select); break; case VIRTCHNL_OP_ADD_ETH_ADDR: case VIRTCHNL_OP_DEL_ETH_ADDR: valid_len = sizeof(struct virtchnl_ether_addr_list); if (msglen >= valid_len) { struct virtchnl_ether_addr_list *veal = (struct virtchnl_ether_addr_list *)msg; valid_len += veal->num_elements * sizeof(struct virtchnl_ether_addr); if (veal->num_elements == 0) err_msg_format = true; } break; case VIRTCHNL_OP_ADD_VLAN: case VIRTCHNL_OP_DEL_VLAN: valid_len = sizeof(struct virtchnl_vlan_filter_list); if (msglen >= valid_len) { struct virtchnl_vlan_filter_list *vfl = (struct virtchnl_vlan_filter_list *)msg; valid_len += vfl->num_elements * sizeof(u16); if (vfl->num_elements == 0) err_msg_format = true; } break; case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: valid_len = sizeof(struct virtchnl_promisc_info); break; case VIRTCHNL_OP_GET_STATS: valid_len = sizeof(struct virtchnl_queue_select); break; /* These are always errors coming from the VF. */ case VIRTCHNL_OP_EVENT: case VIRTCHNL_OP_UNKNOWN: default: return EPERM; break; } /* few more checks */ if ((valid_len != msglen) || (err_msg_format)) return EINVAL; else return 0; } #endif /* ** ixlv_send_pf_msg ** ** Send message to PF and print status if failure. */ static int ixlv_send_pf_msg(struct ixlv_sc *sc, enum virtchnl_ops op, u8 *msg, u16 len) { struct i40e_hw *hw = &sc->hw; device_t dev = sc->dev; i40e_status err; #ifdef IXL_DEBUG /* ** Pre-validating messages to the PF */ int val_err; val_err = ixl_vc_validate_vf_msg(sc, op, msg, len); if (val_err) device_printf(dev, "Error validating msg to PF for op %d," " msglen %d: error %d\n", op, len, val_err); #endif err = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL); if (err) device_printf(dev, "Unable to send opcode %s to PF, " "status %s, aq error %s\n", ixl_vc_opcode_str(op), i40e_stat_str(hw, err), i40e_aq_str(hw, hw->aq.asq_last_status)); return err; } /* ** ixlv_send_api_ver ** ** Send API version admin queue message to the PF. The reply is not checked ** in this function. Returns 0 if the message was successfully ** sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. */ int ixlv_send_api_ver(struct ixlv_sc *sc) { struct virtchnl_version_info vvi; vvi.major = VIRTCHNL_VERSION_MAJOR; vvi.minor = VIRTCHNL_VERSION_MINOR; return ixlv_send_pf_msg(sc, VIRTCHNL_OP_VERSION, (u8 *)&vvi, sizeof(vvi)); } /* ** ixlv_verify_api_ver ** ** Compare API versions with the PF. Must be called after admin queue is ** initialized. Returns 0 if API versions match, EIO if ** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty. */ int ixlv_verify_api_ver(struct ixlv_sc *sc) { struct virtchnl_version_info *pf_vvi; struct i40e_hw *hw = &sc->hw; struct i40e_arq_event_info event; device_t dev = sc->dev; i40e_status err; int retries = 0; event.buf_len = IXL_AQ_BUF_SZ; event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT); if (!event.msg_buf) { err = ENOMEM; goto out; } for (;;) { if (++retries > IXLV_AQ_MAX_ERR) goto out_alloc; /* Initial delay here is necessary */ i40e_msec_pause(100); err = i40e_clean_arq_element(hw, &event, NULL); if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) continue; else if (err) { err = EIO; goto out_alloc; } if ((enum virtchnl_ops)le32toh(event.desc.cookie_high) != VIRTCHNL_OP_VERSION) { DDPRINTF(dev, "Received unexpected op response: %d\n", le32toh(event.desc.cookie_high)); /* Don't stop looking for expected response */ continue; } err = (i40e_status)le32toh(event.desc.cookie_low); if (err) { err = EIO; goto out_alloc; } else break; } pf_vvi = (struct virtchnl_version_info *)event.msg_buf; if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) || ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) && (pf_vvi->minor > VIRTCHNL_VERSION_MINOR))) { device_printf(dev, "Critical PF/VF API version mismatch!\n"); err = EIO; } else sc->pf_version = pf_vvi->minor; /* Log PF/VF api versions */ device_printf(dev, "PF API %d.%d / VF API %d.%d\n", pf_vvi->major, pf_vvi->minor, VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR); out_alloc: free(event.msg_buf, M_DEVBUF); out: return (err); } /* ** ixlv_send_vf_config_msg ** ** Send VF configuration request admin queue message to the PF. The reply ** is not checked in this function. Returns 0 if the message was ** successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. */ int ixlv_send_vf_config_msg(struct ixlv_sc *sc) { u32 caps; caps = VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_RSS_PF | VIRTCHNL_VF_OFFLOAD_VLAN; if (sc->pf_version == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS) return ixlv_send_pf_msg(sc, VIRTCHNL_OP_GET_VF_RESOURCES, NULL, 0); else return ixlv_send_pf_msg(sc, VIRTCHNL_OP_GET_VF_RESOURCES, (u8 *)&caps, sizeof(caps)); } /* ** ixlv_get_vf_config ** ** Get VF configuration from PF and populate hw structure. Must be called after ** admin queue is initialized. Busy waits until response is received from PF, ** with maximum timeout. Response from PF is returned in the buffer for further ** processing by the caller. */ int ixlv_get_vf_config(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; device_t dev = sc->dev; struct i40e_arq_event_info event; u16 len; i40e_status err = 0; u32 retries = 0; /* Note this assumes a single VSI */ len = sizeof(struct virtchnl_vf_resource) + sizeof(struct virtchnl_vsi_resource); event.buf_len = len; event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT); if (!event.msg_buf) { err = ENOMEM; goto out; } for (;;) { err = i40e_clean_arq_element(hw, &event, NULL); if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { if (++retries <= IXLV_AQ_MAX_ERR) i40e_msec_pause(10); } else if ((enum virtchnl_ops)le32toh(event.desc.cookie_high) != VIRTCHNL_OP_GET_VF_RESOURCES) { DDPRINTF(dev, "Received a response from PF," " opcode %d, error %d", le32toh(event.desc.cookie_high), le32toh(event.desc.cookie_low)); retries++; continue; } else { err = (i40e_status)le32toh(event.desc.cookie_low); if (err) { device_printf(dev, "%s: Error returned from PF," " opcode %d, error %d\n", __func__, le32toh(event.desc.cookie_high), le32toh(event.desc.cookie_low)); err = EIO; goto out_alloc; } /* We retrieved the config message, with no errors */ break; } if (retries > IXLV_AQ_MAX_ERR) { INIT_DBG_DEV(dev, "Did not receive response after %d tries.", retries); err = ETIMEDOUT; goto out_alloc; } } memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len)); i40e_vf_parse_hw_config(hw, sc->vf_res); out_alloc: free(event.msg_buf, M_DEVBUF); out: return err; } /* ** ixlv_configure_queues ** ** Request that the PF set up our queues. */ void ixlv_configure_queues(struct ixlv_sc *sc) { device_t dev = sc->dev; struct ixl_vsi *vsi = &sc->vsi; struct ixl_queue *que = vsi->queues; struct tx_ring *txr; struct rx_ring *rxr; int len, pairs; struct virtchnl_vsi_queue_config_info *vqci; struct virtchnl_queue_pair_info *vqpi; pairs = vsi->num_queues; len = sizeof(struct virtchnl_vsi_queue_config_info) + (sizeof(struct virtchnl_queue_pair_info) * pairs); vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); if (!vqci) { device_printf(dev, "%s: unable to allocate memory\n", __func__); ixl_vc_schedule_retry(&sc->vc_mgr); return; } vqci->vsi_id = sc->vsi_res->vsi_id; vqci->num_queue_pairs = pairs; vqpi = vqci->qpair; /* Size check is not needed here - HW max is 16 queue pairs, and we * can fit info for 31 of them into the AQ buffer before it overflows. */ for (int i = 0; i < pairs; i++, que++, vqpi++) { txr = &que->txr; rxr = &que->rxr; vqpi->txq.vsi_id = vqci->vsi_id; vqpi->txq.queue_id = i; vqpi->txq.ring_len = que->num_tx_desc; vqpi->txq.dma_ring_addr = txr->dma.pa; /* Enable Head writeback */ if (vsi->enable_head_writeback) { vqpi->txq.headwb_enabled = 1; vqpi->txq.dma_headwb_addr = txr->dma.pa + (que->num_tx_desc * sizeof(struct i40e_tx_desc)); } vqpi->rxq.vsi_id = vqci->vsi_id; vqpi->rxq.queue_id = i; vqpi->rxq.ring_len = que->num_rx_desc; vqpi->rxq.dma_ring_addr = rxr->dma.pa; vqpi->rxq.max_pkt_size = vsi->max_frame_size; vqpi->rxq.databuffer_size = rxr->mbuf_sz; vqpi->rxq.splithdr_enabled = 0; } ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_VSI_QUEUES, (u8 *)vqci, len); free(vqci, M_DEVBUF); } /* ** ixlv_enable_queues ** ** Request that the PF enable all of our queues. */ void ixlv_enable_queues(struct ixlv_sc *sc) { struct virtchnl_queue_select vqs; vqs.vsi_id = sc->vsi_res->vsi_id; vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1; vqs.rx_queues = vqs.tx_queues; ixlv_send_pf_msg(sc, VIRTCHNL_OP_ENABLE_QUEUES, (u8 *)&vqs, sizeof(vqs)); } /* ** ixlv_disable_queues ** ** Request that the PF disable all of our queues. */ void ixlv_disable_queues(struct ixlv_sc *sc) { struct virtchnl_queue_select vqs; vqs.vsi_id = sc->vsi_res->vsi_id; vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1; vqs.rx_queues = vqs.tx_queues; ixlv_send_pf_msg(sc, VIRTCHNL_OP_DISABLE_QUEUES, (u8 *)&vqs, sizeof(vqs)); } /* ** ixlv_map_queues ** ** Request that the PF map queues to interrupt vectors. Misc causes, including ** admin queue, are always mapped to vector 0. */ void ixlv_map_queues(struct ixlv_sc *sc) { struct virtchnl_irq_map_info *vm; int i, q, len; struct ixl_vsi *vsi = &sc->vsi; struct ixl_queue *que = vsi->queues; /* How many queue vectors, adminq uses one */ q = sc->msix - 1; len = sizeof(struct virtchnl_irq_map_info) + (sc->msix * sizeof(struct virtchnl_vector_map)); vm = malloc(len, M_DEVBUF, M_NOWAIT); if (!vm) { printf("%s: unable to allocate memory\n", __func__); ixl_vc_schedule_retry(&sc->vc_mgr); return; } vm->num_vectors = sc->msix; /* Queue vectors first */ for (i = 0; i < q; i++, que++) { vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id; vm->vecmap[i].vector_id = i + 1; /* first is adminq */ vm->vecmap[i].txq_map = (1 << que->me); vm->vecmap[i].rxq_map = (1 << que->me); vm->vecmap[i].rxitr_idx = 0; vm->vecmap[i].txitr_idx = 1; } /* Misc vector last - this is only for AdminQ messages */ vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id; vm->vecmap[i].vector_id = 0; vm->vecmap[i].txq_map = 0; vm->vecmap[i].rxq_map = 0; vm->vecmap[i].rxitr_idx = 0; vm->vecmap[i].txitr_idx = 0; ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_IRQ_MAP, (u8 *)vm, len); free(vm, M_DEVBUF); } /* ** Scan the Filter List looking for vlans that need ** to be added, then create the data to hand to the AQ ** for handling. */ void ixlv_add_vlans(struct ixlv_sc *sc) { struct virtchnl_vlan_filter_list *v; struct ixlv_vlan_filter *f, *ftmp; device_t dev = sc->dev; int len, i = 0, cnt = 0; /* Get count of VLAN filters to add */ SLIST_FOREACH(f, sc->vlan_filters, next) { if (f->flags & IXL_FILTER_ADD) cnt++; } if (!cnt) { /* no work... */ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER, VIRTCHNL_STATUS_SUCCESS); return; } len = sizeof(struct virtchnl_vlan_filter_list) + (cnt * sizeof(u16)); if (len > IXL_AQ_BUF_SZ) { device_printf(dev, "%s: Exceeded Max AQ Buf size\n", __func__); ixl_vc_schedule_retry(&sc->vc_mgr); return; } v = malloc(len, M_DEVBUF, M_NOWAIT); if (!v) { device_printf(dev, "%s: unable to allocate memory\n", __func__); ixl_vc_schedule_retry(&sc->vc_mgr); return; } v->vsi_id = sc->vsi_res->vsi_id; v->num_elements = cnt; /* Scan the filter array */ SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) { if (f->flags & IXL_FILTER_ADD) { bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16)); f->flags = IXL_FILTER_USED; i++; } if (i == cnt) break; } ixlv_send_pf_msg(sc, VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len); free(v, M_DEVBUF); /* add stats? */ } /* ** Scan the Filter Table looking for vlans that need ** to be removed, then create the data to hand to the AQ ** for handling. */ void ixlv_del_vlans(struct ixlv_sc *sc) { device_t dev = sc->dev; struct virtchnl_vlan_filter_list *v; struct ixlv_vlan_filter *f, *ftmp; int len, i = 0, cnt = 0; /* Get count of VLAN filters to delete */ SLIST_FOREACH(f, sc->vlan_filters, next) { if (f->flags & IXL_FILTER_DEL) cnt++; } if (!cnt) { /* no work... */ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER, VIRTCHNL_STATUS_SUCCESS); return; } len = sizeof(struct virtchnl_vlan_filter_list) + (cnt * sizeof(u16)); if (len > IXL_AQ_BUF_SZ) { device_printf(dev, "%s: Exceeded Max AQ Buf size\n", __func__); ixl_vc_schedule_retry(&sc->vc_mgr); return; } v = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); if (!v) { device_printf(dev, "%s: unable to allocate memory\n", __func__); ixl_vc_schedule_retry(&sc->vc_mgr); return; } v->vsi_id = sc->vsi_res->vsi_id; v->num_elements = cnt; /* Scan the filter array */ SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) { if (f->flags & IXL_FILTER_DEL) { bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16)); i++; SLIST_REMOVE(sc->vlan_filters, f, ixlv_vlan_filter, next); free(f, M_DEVBUF); } if (i == cnt) break; } ixlv_send_pf_msg(sc, VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len); free(v, M_DEVBUF); /* add stats? */ } /* ** This routine takes additions to the vsi filter ** table and creates an Admin Queue call to create ** the filters in the hardware. */ void ixlv_add_ether_filters(struct ixlv_sc *sc) { struct virtchnl_ether_addr_list *a; struct ixlv_mac_filter *f; device_t dev = sc->dev; int len, j = 0, cnt = 0; /* Get count of MAC addresses to add */ SLIST_FOREACH(f, sc->mac_filters, next) { if (f->flags & IXL_FILTER_ADD) cnt++; } if (cnt == 0) { /* Should not happen... */ DDPRINTF(dev, "cnt == 0, exiting..."); ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER, VIRTCHNL_STATUS_SUCCESS); return; } len = sizeof(struct virtchnl_ether_addr_list) + (cnt * sizeof(struct virtchnl_ether_addr)); a = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); if (a == NULL) { device_printf(dev, "%s: Failed to get memory for " "virtchnl_ether_addr_list\n", __func__); ixl_vc_schedule_retry(&sc->vc_mgr); return; } a->vsi_id = sc->vsi.id; a->num_elements = cnt; /* Scan the filter array */ SLIST_FOREACH(f, sc->mac_filters, next) { if (f->flags & IXL_FILTER_ADD) { bcopy(f->macaddr, a->list[j].addr, ETHER_ADDR_LEN); f->flags &= ~IXL_FILTER_ADD; j++; DDPRINTF(dev, "ADD: " MAC_FORMAT, MAC_FORMAT_ARGS(f->macaddr)); } if (j == cnt) break; } DDPRINTF(dev, "len %d, j %d, cnt %d", len, j, cnt); ixlv_send_pf_msg(sc, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)a, len); /* add stats? */ free(a, M_DEVBUF); return; } /* ** This routine takes filters flagged for deletion in the ** sc MAC filter list and creates an Admin Queue call ** to delete those filters in the hardware. */ void ixlv_del_ether_filters(struct ixlv_sc *sc) { struct virtchnl_ether_addr_list *d; device_t dev = sc->dev; struct ixlv_mac_filter *f, *f_temp; int len, j = 0, cnt = 0; /* Get count of MAC addresses to delete */ SLIST_FOREACH(f, sc->mac_filters, next) { if (f->flags & IXL_FILTER_DEL) cnt++; } if (cnt == 0) { DDPRINTF(dev, "cnt == 0, exiting..."); ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER, VIRTCHNL_STATUS_SUCCESS); return; } len = sizeof(struct virtchnl_ether_addr_list) + (cnt * sizeof(struct virtchnl_ether_addr)); d = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); if (d == NULL) { device_printf(dev, "%s: Failed to get memory for " "virtchnl_ether_addr_list\n", __func__); ixl_vc_schedule_retry(&sc->vc_mgr); return; } d->vsi_id = sc->vsi.id; d->num_elements = cnt; /* Scan the filter array */ SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) { if (f->flags & IXL_FILTER_DEL) { bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN); DDPRINTF(dev, "DEL: " MAC_FORMAT, MAC_FORMAT_ARGS(f->macaddr)); j++; SLIST_REMOVE(sc->mac_filters, f, ixlv_mac_filter, next); free(f, M_DEVBUF); } if (j == cnt) break; } ixlv_send_pf_msg(sc, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)d, len); /* add stats? */ free(d, M_DEVBUF); return; } /* ** ixlv_request_reset ** Request that the PF reset this VF. No response is expected. */ void ixlv_request_reset(struct ixlv_sc *sc) { /* ** Set the reset status to "in progress" before ** the request, this avoids any possibility of ** a mistaken early detection of completion. */ wr32(&sc->hw, I40E_VFGEN_RSTAT, VIRTCHNL_VFR_INPROGRESS); ixlv_send_pf_msg(sc, VIRTCHNL_OP_RESET_VF, NULL, 0); } /* ** ixlv_request_stats ** Request the statistics for this VF's VSI from PF. */ void ixlv_request_stats(struct ixlv_sc *sc) { struct virtchnl_queue_select vqs; int error = 0; vqs.vsi_id = sc->vsi_res->vsi_id; /* Low priority, we don't need to error check */ error = ixlv_send_pf_msg(sc, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs, sizeof(vqs)); #ifdef IXL_DEBUG if (error) device_printf(sc->dev, "Error sending stats request to PF: %d\n", error); #endif } /* ** Updates driver's stats counters with VSI stats returned from PF. */ void ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es) { struct ixl_vsi *vsi = &sc->vsi; uint64_t tx_discards; tx_discards = es->tx_discards; for (int i = 0; i < vsi->num_queues; i++) tx_discards += sc->vsi.queues[i].txr.br->br_drops; /* Update ifnet stats */ IXL_SET_IPACKETS(vsi, es->rx_unicast + es->rx_multicast + es->rx_broadcast); IXL_SET_OPACKETS(vsi, es->tx_unicast + es->tx_multicast + es->tx_broadcast); IXL_SET_IBYTES(vsi, es->rx_bytes); IXL_SET_OBYTES(vsi, es->tx_bytes); IXL_SET_IMCASTS(vsi, es->rx_multicast); IXL_SET_OMCASTS(vsi, es->tx_multicast); IXL_SET_OERRORS(vsi, es->tx_errors); IXL_SET_IQDROPS(vsi, es->rx_discards); IXL_SET_OQDROPS(vsi, tx_discards); IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol); IXL_SET_COLLISIONS(vsi, 0); vsi->eth_stats = *es; } void ixlv_config_rss_key(struct ixlv_sc *sc) { struct virtchnl_rss_key *rss_key_msg; int msg_len, key_length; u8 rss_seed[IXL_RSS_KEY_SIZE]; #ifdef RSS /* Fetch the configured RSS key */ rss_getkey((uint8_t *) &rss_seed); #else ixl_get_default_rss_key((u32 *)rss_seed); #endif /* Send the fetched key */ key_length = IXL_RSS_KEY_SIZE; msg_len = sizeof(struct virtchnl_rss_key) + (sizeof(u8) * key_length) - 1; rss_key_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO); if (rss_key_msg == NULL) { device_printf(sc->dev, "Unable to allocate msg memory for RSS key msg.\n"); return; } rss_key_msg->vsi_id = sc->vsi_res->vsi_id; rss_key_msg->key_len = key_length; bcopy(rss_seed, &rss_key_msg->key[0], key_length); DDPRINTF(sc->dev, "config_rss: vsi_id %d, key_len %d", rss_key_msg->vsi_id, rss_key_msg->key_len); ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)rss_key_msg, msg_len); free(rss_key_msg, M_DEVBUF); } void ixlv_set_rss_hena(struct ixlv_sc *sc) { struct virtchnl_rss_hena hena; hena.hena = IXL_DEFAULT_RSS_HENA_X722; ixlv_send_pf_msg(sc, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&hena, sizeof(hena)); } void ixlv_config_rss_lut(struct ixlv_sc *sc) { struct virtchnl_rss_lut *rss_lut_msg; int msg_len; u16 lut_length; u32 lut; int i, que_id; lut_length = IXL_RSS_VSI_LUT_SIZE; msg_len = sizeof(struct virtchnl_rss_lut) + (lut_length * sizeof(u8)) - 1; rss_lut_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO); if (rss_lut_msg == NULL) { device_printf(sc->dev, "Unable to allocate msg memory for RSS lut msg.\n"); return; } rss_lut_msg->vsi_id = sc->vsi_res->vsi_id; /* Each LUT entry is a max of 1 byte, so this is easy */ rss_lut_msg->lut_entries = lut_length; /* Populate the LUT with max no. of queues in round robin fashion */ for (i = 0; i < lut_length; i++) { #ifdef RSS /* * Fetch the RSS bucket id for the given indirection entry. * Cap it at the number of configured buckets (which is * num_queues.) */ que_id = rss_get_indirection_to_bucket(i); que_id = que_id % sc->vsi.num_queues; #else que_id = i % sc->vsi.num_queues; #endif lut = que_id & IXL_RSS_VSI_LUT_ENTRY_MASK; rss_lut_msg->lut[i] = lut; } ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)rss_lut_msg, msg_len); free(rss_lut_msg, M_DEVBUF); } /* ** ixlv_vc_completion ** ** Asynchronous completion function for admin queue messages. Rather than busy ** wait, we fire off our requests and assume that no errors will be returned. ** This function handles the reply messages. */ void ixlv_vc_completion(struct ixlv_sc *sc, enum virtchnl_ops v_opcode, enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) { device_t dev = sc->dev; struct ixl_vsi *vsi = &sc->vsi; if (v_opcode == VIRTCHNL_OP_EVENT) { struct virtchnl_pf_event *vpe = (struct virtchnl_pf_event *)msg; switch (vpe->event) { case VIRTCHNL_EVENT_LINK_CHANGE: #ifdef IXL_DEBUG device_printf(dev, "Link change: status %d, speed %d\n", vpe->event_data.link_event.link_status, vpe->event_data.link_event.link_speed); #endif sc->link_up = vpe->event_data.link_event.link_status; sc->link_speed = vpe->event_data.link_event.link_speed; ixlv_update_link_status(sc); break; case VIRTCHNL_EVENT_RESET_IMPENDING: device_printf(dev, "PF initiated reset!\n"); sc->init_state = IXLV_RESET_PENDING; mtx_unlock(&sc->mtx); ixlv_init(vsi); mtx_lock(&sc->mtx); break; default: device_printf(dev, "%s: Unknown event %d from AQ\n", __func__, vpe->event); break; } return; } /* Catch-all error response */ if (v_retval) { device_printf(dev, "%s: AQ returned error %s to our request %s!\n", __func__, i40e_vc_stat_str(&sc->hw, v_retval), ixl_vc_opcode_str(v_opcode)); } #ifdef IXL_DEBUG if (v_opcode != VIRTCHNL_OP_GET_STATS) DDPRINTF(dev, "opcode %d", v_opcode); #endif switch (v_opcode) { case VIRTCHNL_OP_GET_STATS: ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg); break; case VIRTCHNL_OP_ADD_ETH_ADDR: ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER, v_retval); if (v_retval) { device_printf(dev, "WARNING: Error adding VF mac filter!\n"); device_printf(dev, "WARNING: Device may not receive traffic!\n"); } break; case VIRTCHNL_OP_DEL_ETH_ADDR: ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER, v_retval); break; case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_PROMISC, v_retval); break; case VIRTCHNL_OP_ADD_VLAN: ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER, v_retval); break; case VIRTCHNL_OP_DEL_VLAN: ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER, v_retval); break; case VIRTCHNL_OP_ENABLE_QUEUES: ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ENABLE_QUEUES, v_retval); if (v_retval == 0) { /* Update link status */ ixlv_update_link_status(sc); /* Turn on all interrupts */ ixlv_enable_intr(vsi); /* And inform the stack we're ready */ vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING; /* TODO: Clear a state flag, so we know we're ready to run init again */ } break; case VIRTCHNL_OP_DISABLE_QUEUES: ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DISABLE_QUEUES, v_retval); if (v_retval == 0) { /* Turn off all interrupts */ ixlv_disable_intr(vsi); /* Tell the stack that the interface is no longer active */ vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING); } break; case VIRTCHNL_OP_CONFIG_VSI_QUEUES: ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_QUEUES, v_retval); break; case VIRTCHNL_OP_CONFIG_IRQ_MAP: ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_MAP_VECTORS, v_retval); break; case VIRTCHNL_OP_CONFIG_RSS_KEY: ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_KEY, v_retval); break; case VIRTCHNL_OP_SET_RSS_HENA: ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_SET_RSS_HENA, v_retval); break; case VIRTCHNL_OP_CONFIG_RSS_LUT: ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_LUT, v_retval); break; default: #ifdef IXL_DEBUG device_printf(dev, "%s: Received unexpected message %s from PF.\n", __func__, ixl_vc_opcode_str(v_opcode)); #endif break; } return; } static void ixl_vc_send_cmd(struct ixlv_sc *sc, uint32_t request) { switch (request) { case IXLV_FLAG_AQ_MAP_VECTORS: ixlv_map_queues(sc); break; case IXLV_FLAG_AQ_ADD_MAC_FILTER: ixlv_add_ether_filters(sc); break; case IXLV_FLAG_AQ_ADD_VLAN_FILTER: ixlv_add_vlans(sc); break; case IXLV_FLAG_AQ_DEL_MAC_FILTER: ixlv_del_ether_filters(sc); break; case IXLV_FLAG_AQ_DEL_VLAN_FILTER: ixlv_del_vlans(sc); break; case IXLV_FLAG_AQ_CONFIGURE_QUEUES: ixlv_configure_queues(sc); break; case IXLV_FLAG_AQ_DISABLE_QUEUES: ixlv_disable_queues(sc); break; case IXLV_FLAG_AQ_ENABLE_QUEUES: ixlv_enable_queues(sc); break; case IXLV_FLAG_AQ_CONFIG_RSS_KEY: ixlv_config_rss_key(sc); break; case IXLV_FLAG_AQ_SET_RSS_HENA: ixlv_set_rss_hena(sc); break; case IXLV_FLAG_AQ_CONFIG_RSS_LUT: ixlv_config_rss_lut(sc); break; } } void ixl_vc_init_mgr(struct ixlv_sc *sc, struct ixl_vc_mgr *mgr) { mgr->sc = sc; mgr->current = NULL; TAILQ_INIT(&mgr->pending); callout_init_mtx(&mgr->callout, &sc->mtx, 0); } static void ixl_vc_process_completion(struct ixl_vc_mgr *mgr, enum i40e_status_code err) { struct ixl_vc_cmd *cmd; cmd = mgr->current; mgr->current = NULL; cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY; cmd->callback(cmd, cmd->arg, err); ixl_vc_process_next(mgr); } static void ixl_vc_process_resp(struct ixl_vc_mgr *mgr, uint32_t request, enum virtchnl_status_code err) { struct ixl_vc_cmd *cmd; cmd = mgr->current; if (cmd == NULL || cmd->request != request) return; callout_stop(&mgr->callout); /* ATM, the virtchnl codes map to i40e ones directly */ ixl_vc_process_completion(mgr, (enum i40e_status_code)err); } static void ixl_vc_cmd_timeout(void *arg) { struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg; IXLV_CORE_LOCK_ASSERT(mgr->sc); ixl_vc_process_completion(mgr, I40E_ERR_TIMEOUT); } static void ixl_vc_cmd_retry(void *arg) { struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg; IXLV_CORE_LOCK_ASSERT(mgr->sc); ixl_vc_send_current(mgr); } static void ixl_vc_send_current(struct ixl_vc_mgr *mgr) { struct ixl_vc_cmd *cmd; cmd = mgr->current; ixl_vc_send_cmd(mgr->sc, cmd->request); callout_reset(&mgr->callout, IXLV_VC_TIMEOUT, ixl_vc_cmd_timeout, mgr); } static void ixl_vc_process_next(struct ixl_vc_mgr *mgr) { struct ixl_vc_cmd *cmd; if (mgr->current != NULL) return; if (TAILQ_EMPTY(&mgr->pending)) return; cmd = TAILQ_FIRST(&mgr->pending); TAILQ_REMOVE(&mgr->pending, cmd, next); mgr->current = cmd; ixl_vc_send_current(mgr); } static void ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr) { callout_reset(&mgr->callout, howmany(hz, 100), ixl_vc_cmd_retry, mgr); } void ixl_vc_enqueue(struct ixl_vc_mgr *mgr, struct ixl_vc_cmd *cmd, uint32_t req, ixl_vc_callback_t *callback, void *arg) { IXLV_CORE_LOCK_ASSERT(mgr->sc); if (cmd->flags & IXLV_VC_CMD_FLAG_BUSY) { if (mgr->current == cmd) mgr->current = NULL; else TAILQ_REMOVE(&mgr->pending, cmd, next); } cmd->request = req; cmd->callback = callback; cmd->arg = arg; cmd->flags |= IXLV_VC_CMD_FLAG_BUSY; TAILQ_INSERT_TAIL(&mgr->pending, cmd, next); ixl_vc_process_next(mgr); } void ixl_vc_flush(struct ixl_vc_mgr *mgr) { struct ixl_vc_cmd *cmd; IXLV_CORE_LOCK_ASSERT(mgr->sc); KASSERT(TAILQ_EMPTY(&mgr->pending) || mgr->current != NULL, ("ixlv: pending commands waiting but no command in progress")); cmd = mgr->current; if (cmd != NULL) { mgr->current = NULL; cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY; cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED); } while ((cmd = TAILQ_FIRST(&mgr->pending)) != NULL) { TAILQ_REMOVE(&mgr->pending, cmd, next); cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY; cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED); } callout_stop(&mgr->callout); } Index: releng/11.3/sys/dev/ixl/virtchnl.h =================================================================== --- releng/11.3/sys/dev/ixl/virtchnl.h (revision 349180) +++ releng/11.3/sys/dev/ixl/virtchnl.h (revision 349181) @@ -1,747 +1,956 @@ /****************************************************************************** - Copyright (c) 2013-2017, Intel Corporation + Copyright (c) 2013-2019, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ + #ifndef _VIRTCHNL_H_ #define _VIRTCHNL_H_ /* Description: * This header file describes the VF-PF communication protocol used * by the drivers for all devices starting from our 40G product line * * Admin queue buffer usage: * desc->opcode is always aqc_opc_send_msg_to_pf * flags, retval, datalen, and data addr are all used normally. * The Firmware copies the cookie fields when sending messages between the * PF and VF, but uses all other fields internally. Due to this limitation, * we must send all messages as "indirect", i.e. using an external buffer. * * All the VSI indexes are relative to the VF. Each VF can have maximum of * three VSIs. All the queue indexes are relative to the VSI. Each VF can * have a maximum of sixteen queues for all of its VSIs. * * The PF is required to return a status code in v_retval for all messages * except RESET_VF, which does not require any response. The return value * is of status_code type, defined in the shared type.h. * * In general, VF driver initialization should roughly follow the order of * these opcodes. The VF driver must first validate the API version of the * PF driver, then request a reset, then get resources, then configure * queues and interrupts. After these operations are complete, the VF * driver may start its queues, optionally add MAC and VLAN filters, and * process traffic. */ /* START GENERIC DEFINES * Need to ensure the following enums and defines hold the same meaning and * value in current and future projects */ /* Error Codes */ enum virtchnl_status_code { VIRTCHNL_STATUS_SUCCESS = 0, - VIRTCHNL_ERR_PARAM = -5, + VIRTCHNL_STATUS_ERR_PARAM = -5, + VIRTCHNL_STATUS_ERR_NO_MEMORY = -18, VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38, VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39, VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40, - VIRTCHNL_STATUS_NOT_SUPPORTED = -64, + VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64, }; +/* Backward compatibility */ +#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM +#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED + +#define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0 #define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1 #define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2 #define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3 #define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4 #define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5 #define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6 +#define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7 enum virtchnl_link_speed { VIRTCHNL_LINK_SPEED_UNKNOWN = 0, VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT), VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT), VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT), VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT), VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT), VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT), + VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT), + VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT), }; /* for hsplit_0 field of Rx HMC context */ /* deprecated with AVF 1.0 */ enum virtchnl_rx_hsplit { VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0, VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1, VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2, VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4, VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8, }; #define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6 /* END GENERIC DEFINES */ /* Opcodes for VF-PF communication. These are placed in the v_opcode field * of the virtchnl_msg structure. */ enum virtchnl_ops { /* The PF sends status change events to VFs using * the VIRTCHNL_OP_EVENT opcode. * VFs send requests to the PF using the other ops. * Use of "advanced opcode" features must be negotiated as part of capabilities * exchange and are not considered part of base mode feature set. */ VIRTCHNL_OP_UNKNOWN = 0, VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ VIRTCHNL_OP_RESET_VF = 2, VIRTCHNL_OP_GET_VF_RESOURCES = 3, VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, VIRTCHNL_OP_ENABLE_QUEUES = 8, VIRTCHNL_OP_DISABLE_QUEUES = 9, VIRTCHNL_OP_ADD_ETH_ADDR = 10, VIRTCHNL_OP_DEL_ETH_ADDR = 11, VIRTCHNL_OP_ADD_VLAN = 12, VIRTCHNL_OP_DEL_VLAN = 13, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, VIRTCHNL_OP_GET_STATS = 15, VIRTCHNL_OP_RSVD = 16, VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ + /* opcode 19 is reserved */ VIRTCHNL_OP_IWARP = 20, /* advanced opcode */ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */ VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */ VIRTCHNL_OP_CONFIG_RSS_KEY = 23, VIRTCHNL_OP_CONFIG_RSS_LUT = 24, VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, VIRTCHNL_OP_SET_RSS_HENA = 26, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28, VIRTCHNL_OP_REQUEST_QUEUES = 29, - + VIRTCHNL_OP_ENABLE_CHANNELS = 30, + VIRTCHNL_OP_DISABLE_CHANNELS = 31, + VIRTCHNL_OP_ADD_CLOUD_FILTER = 32, + VIRTCHNL_OP_DEL_CLOUD_FILTER = 33, }; -/* This macro is used to generate a compilation error if a structure +/* These macros are used to generate compilation errors if a structure/union * is not exactly the correct length. It gives a divide by zero error if the - * structure is not of the correct size, otherwise it creates an enum that is - * never used. + * structure/union is not of the correct size, otherwise it creates an enum + * that is never used. */ #define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \ - {virtchnl_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0)} + { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } +#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \ + { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) } /* Virtual channel message descriptor. This overlays the admin queue * descriptor. All other data is passed in external buffers. */ struct virtchnl_msg { u8 pad[8]; /* AQ flags/opcode/len/retval fields */ enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ enum virtchnl_status_code v_retval; /* ditto for desc->retval */ u32 vfid; /* used by PF when sending to VF */ }; VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg); -/* Message descriptions and data structures.*/ +/* Message descriptions and data structures. */ /* VIRTCHNL_OP_VERSION * VF posts its version number to the PF. PF responds with its version number * in the same format, along with a return code. * Reply from PF has its major/minor versions also in param0 and param1. * If there is a major version mismatch, then the VF cannot operate. * If there is a minor version mismatch, then the VF can operate but should * add a warning to the system log. * * This enum element MUST always be specified as == 1, regardless of other * changes in the API. The PF must always respond to this message without * error regardless of version mismatch. */ #define VIRTCHNL_VERSION_MAJOR 1 #define VIRTCHNL_VERSION_MINOR 1 #define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 struct virtchnl_version_info { u32 major; u32 minor; }; VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info); #define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0)) #define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1)) /* VIRTCHNL_OP_RESET_VF * VF sends this request to PF with no parameters * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register * until reset completion is indicated. The admin queue must be reinitialized * after this operation. * * When reset is complete, PF must ensure that all queues in all VSIs associated * with the VF are stopped, all queue configurations in the HMC are set to 0, * and all MAC and VLAN filters (except the default MAC address) on all VSIs * are cleared. */ /* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV * vsi_type should always be 6 for backward compatibility. Add other fields * as needed. */ enum virtchnl_vsi_type { VIRTCHNL_VSI_TYPE_INVALID = 0, VIRTCHNL_VSI_SRIOV = 6, }; /* VIRTCHNL_OP_GET_VF_RESOURCES * Version 1.0 VF sends this request to PF with no parameters * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities * PF responds with an indirect message containing * virtchnl_vf_resource and one or more * virtchnl_vsi_resource structures. */ struct virtchnl_vsi_resource { u16 vsi_id; u16 num_queue_pairs; enum virtchnl_vsi_type vsi_type; u16 qset_handle; u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; }; VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); /* VF capability flags * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including * TX/RX Checksum offloading and TSO for non-tunnelled packets. */ #define VIRTCHNL_VF_OFFLOAD_L2 0x00000001 #define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 #define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004 #define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 #define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040 +#define VIRTCHNL_VF_OFFLOAD_CRC 0x00000080 #define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 #define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 #define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000 #define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000 - +#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000 +/* Define below the capability flags that are not offloads */ +#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ VIRTCHNL_VF_OFFLOAD_VLAN | \ VIRTCHNL_VF_OFFLOAD_RSS_PF) struct virtchnl_vf_resource { u16 num_vsis; u16 num_queue_pairs; u16 max_vectors; u16 max_mtu; u32 vf_cap_flags; u32 rss_key_size; u32 rss_lut_size; struct virtchnl_vsi_resource vsi_res[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource); /* VIRTCHNL_OP_CONFIG_TX_QUEUE * VF sends this message to set up parameters for one TX queue. * External data buffer contains one instance of virtchnl_txq_info. * PF configures requested queue and returns a status code. */ /* Tx queue config info */ struct virtchnl_txq_info { u16 vsi_id; u16 queue_id; u16 ring_len; /* number of descriptors, multiple of 8 */ u16 headwb_enabled; /* deprecated with AVF 1.0 */ u64 dma_ring_addr; u64 dma_headwb_addr; /* deprecated with AVF 1.0 */ }; VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info); /* VIRTCHNL_OP_CONFIG_RX_QUEUE * VF sends this message to set up parameters for one RX queue. * External data buffer contains one instance of virtchnl_rxq_info. - * PF configures requested queue and returns a status code. + * PF configures requested queue and returns a status code. The + * crc_disable flag disables CRC stripping on the VF. Setting + * the crc_disable flag to 1 will disable CRC stripping for each + * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC + * offload must have been set prior to sending this info or the PF + * will ignore the request. This flag should be set the same for + * all of the queues for a VF. */ /* Rx queue config info */ struct virtchnl_rxq_info { u16 vsi_id; u16 queue_id; u32 ring_len; /* number of descriptors, multiple of 32 */ u16 hdr_size; u16 splithdr_enabled; /* deprecated with AVF 1.0 */ u32 databuffer_size; u32 max_pkt_size; - u32 pad1; + u8 crc_disable; + u8 pad1[3]; u64 dma_ring_addr; enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */ u32 pad2; }; VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info); /* VIRTCHNL_OP_CONFIG_VSI_QUEUES - * VF sends this message to set parameters for all active TX and RX queues + * VF sends this message to set parameters for active TX and RX queues * associated with the specified VSI. * PF configures queues and returns status. * If the number of queues specified is greater than the number of queues * associated with the VSI, an error is returned and no queues are configured. + * NOTE: The VF is not required to configure all queues in a single request. + * It may send multiple messages. PF drivers must correctly handle all VF + * requests. */ struct virtchnl_queue_pair_info { /* NOTE: vsi_id and queue_id should be identical for both queues. */ struct virtchnl_txq_info txq; struct virtchnl_rxq_info rxq; }; VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info); struct virtchnl_vsi_queue_config_info { u16 vsi_id; u16 num_queue_pairs; u32 pad; struct virtchnl_queue_pair_info qpair[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); /* VIRTCHNL_OP_REQUEST_QUEUES * VF sends this message to request the PF to allocate additional queues to * this VF. Each VF gets a guaranteed number of queues on init but asking for * additional queues must be negotiated. This is a best effort request as it * is possible the PF does not have enough queues left to support the request. * If the PF cannot support the number requested it will respond with the * maximum number it is able to support; otherwise it will respond with the * number requested. */ /* VF resource request */ struct virtchnl_vf_res_request { u16 num_queue_pairs; }; /* VIRTCHNL_OP_CONFIG_IRQ_MAP * VF uses this message to map vectors to queues. * The rxq_map and txq_map fields are bitmaps used to indicate which queues * are to be associated with the specified vector. - * The "other" causes are always mapped to vector 0. + * The "other" causes are always mapped to vector 0. The VF may not request + * that vector 0 be used for traffic. * PF configures interrupt mapping and returns status. + * NOTE: due to hardware requirements, all active queues (both TX and RX) + * should be mapped to interrupts, even if the driver intends to operate + * only in polling mode. In this case the interrupt may be disabled, but + * the ITR timer will still run to trigger writebacks. */ struct virtchnl_vector_map { u16 vsi_id; u16 vector_id; u16 rxq_map; u16 txq_map; u16 rxitr_idx; u16 txitr_idx; }; VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map); struct virtchnl_irq_map_info { u16 num_vectors; struct virtchnl_vector_map vecmap[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info); /* VIRTCHNL_OP_ENABLE_QUEUES * VIRTCHNL_OP_DISABLE_QUEUES * VF sends these message to enable or disable TX/RX queue pairs. * The queues fields are bitmaps indicating which queues to act upon. * (Currently, we only support 16 queues per VF, but we make the field * u32 to allow for expansion.) * PF performs requested action and returns status. + * NOTE: The VF is not required to enable/disable all queues in a single + * request. It may send multiple messages. + * PF drivers must correctly handle all VF requests. */ struct virtchnl_queue_select { u16 vsi_id; u16 pad; u32 rx_queues; u32 tx_queues; }; VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select); /* VIRTCHNL_OP_ADD_ETH_ADDR * VF sends this message in order to add one or more unicast or multicast * address filters for the specified VSI. * PF adds the filters and returns status. */ /* VIRTCHNL_OP_DEL_ETH_ADDR * VF sends this message in order to remove one or more unicast or multicast * filters for the specified VSI. * PF removes the filters and returns status. */ struct virtchnl_ether_addr { u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; u8 pad[2]; }; VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr); struct virtchnl_ether_addr_list { u16 vsi_id; u16 num_elements; struct virtchnl_ether_addr list[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list); /* VIRTCHNL_OP_ADD_VLAN * VF sends this message to add one or more VLAN tag filters for receives. * PF adds the filters and returns status. * If a port VLAN is configured by the PF, this operation will return an * error to the VF. */ /* VIRTCHNL_OP_DEL_VLAN * VF sends this message to remove one or more VLAN tag filters for receives. * PF removes the filters and returns status. * If a port VLAN is configured by the PF, this operation will return an * error to the VF. */ struct virtchnl_vlan_filter_list { u16 vsi_id; u16 num_elements; u16 vlan_id[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list); /* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE * VF sends VSI id and flags. * PF returns status code in retval. * Note: we assume that broadcast accept mode is always enabled. */ struct virtchnl_promisc_info { u16 vsi_id; u16 flags; }; VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info); #define FLAG_VF_UNICAST_PROMISC 0x00000001 #define FLAG_VF_MULTICAST_PROMISC 0x00000002 /* VIRTCHNL_OP_GET_STATS * VF sends this message to request stats for the selected VSI. VF uses * the virtchnl_queue_select struct to specify the VSI. The queue_id * field is ignored by the PF. * - * PF replies with struct eth_stats in an external buffer. + * PF replies with struct virtchnl_eth_stats in an external buffer. */ +struct virtchnl_eth_stats { + u64 rx_bytes; /* received bytes */ + u64 rx_unicast; /* received unicast pkts */ + u64 rx_multicast; /* received multicast pkts */ + u64 rx_broadcast; /* received broadcast pkts */ + u64 rx_discards; + u64 rx_unknown_protocol; + u64 tx_bytes; /* transmitted bytes */ + u64 tx_unicast; /* transmitted unicast pkts */ + u64 tx_multicast; /* transmitted multicast pkts */ + u64 tx_broadcast; /* transmitted broadcast pkts */ + u64 tx_discards; + u64 tx_errors; +}; + /* VIRTCHNL_OP_CONFIG_RSS_KEY * VIRTCHNL_OP_CONFIG_RSS_LUT * VF sends these messages to configure RSS. Only supported if both PF * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during * configuration negotiation. If this is the case, then the RSS fields in * the VF resource struct are valid. * Both the key and LUT are initialized to 0 by the PF, meaning that * RSS is effectively disabled until set up by the VF. */ struct virtchnl_rss_key { u16 vsi_id; u16 key_len; u8 key[1]; /* RSS hash key, packed bytes */ }; VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key); struct virtchnl_rss_lut { u16 vsi_id; u16 lut_entries; u8 lut[1]; /* RSS lookup table */ }; VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut); /* VIRTCHNL_OP_GET_RSS_HENA_CAPS * VIRTCHNL_OP_SET_RSS_HENA * VF sends these messages to get and set the hash filter enable bits for RSS. * By default, the PF sets these to all possible traffic types that the * hardware supports. The VF can query this value if it wants to change the * traffic types that are hashed by the hardware. */ struct virtchnl_rss_hena { u64 hena; }; VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena); +/* VIRTCHNL_OP_ENABLE_CHANNELS + * VIRTCHNL_OP_DISABLE_CHANNELS + * VF sends these messages to enable or disable channels based on + * the user specified queue count and queue offset for each traffic class. + * This struct encompasses all the information that the PF needs from + * VF to create a channel. + */ +struct virtchnl_channel_info { + u16 count; /* number of queues in a channel */ + u16 offset; /* queues in a channel start from 'offset' */ + u32 pad; + u64 max_tx_rate; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info); + +struct virtchnl_tc_info { + u32 num_tc; + u32 pad; + struct virtchnl_channel_info list[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info); + +/* VIRTCHNL_ADD_CLOUD_FILTER + * VIRTCHNL_DEL_CLOUD_FILTER + * VF sends these messages to add or delete a cloud filter based on the + * user specified match and action filters. These structures encompass + * all the information that the PF needs from the VF to add/delete a + * cloud filter. + */ + +struct virtchnl_l4_spec { + u8 src_mac[ETH_ALEN]; + u8 dst_mac[ETH_ALEN]; + __be16 vlan_id; + __be16 pad; /* reserved for future use */ + __be32 src_ip[4]; + __be32 dst_ip[4]; + __be16 src_port; + __be16 dst_port; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec); + +union virtchnl_flow_spec { + struct virtchnl_l4_spec tcp_spec; + u8 buffer[128]; /* reserved for future use */ +}; + +VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec); + +enum virtchnl_action { + /* action types */ + VIRTCHNL_ACTION_DROP = 0, + VIRTCHNL_ACTION_TC_REDIRECT, +}; + +enum virtchnl_flow_type { + /* flow types */ + VIRTCHNL_TCP_V4_FLOW = 0, + VIRTCHNL_TCP_V6_FLOW, +}; + +struct virtchnl_filter { + union virtchnl_flow_spec data; + union virtchnl_flow_spec mask; + enum virtchnl_flow_type flow_type; + enum virtchnl_action action; + u32 action_meta; + u8 field_flags; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter); + /* VIRTCHNL_OP_EVENT * PF sends this message to inform the VF driver of events that may affect it. * No direct response is expected from the VF, though it may generate other * messages in response to this one. */ enum virtchnl_event_codes { VIRTCHNL_EVENT_UNKNOWN = 0, VIRTCHNL_EVENT_LINK_CHANGE, VIRTCHNL_EVENT_RESET_IMPENDING, VIRTCHNL_EVENT_PF_DRIVER_CLOSE, }; #define PF_EVENT_SEVERITY_INFO 0 #define PF_EVENT_SEVERITY_ATTENTION 1 #define PF_EVENT_SEVERITY_ACTION_REQUIRED 2 #define PF_EVENT_SEVERITY_CERTAIN_DOOM 255 struct virtchnl_pf_event { enum virtchnl_event_codes event; union { + /* If the PF driver does not support the new speed reporting + * capabilities then use link_event else use link_event_adv to + * get the speed and link information. The ability to understand + * new speeds is indicated by setting the capability flag + * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter + * in virtchnl_vf_resource struct and can be used to determine + * which link event struct to use below. + */ struct { enum virtchnl_link_speed link_speed; - bool link_status; + u8 link_status; } link_event; + struct { + /* link_speed provided in Mbps */ + u32 link_speed; + u8 link_status; + } link_event_adv; } event_data; int severity; }; VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event); /* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP * VF uses this message to request PF to map IWARP vectors to IWARP queues. * The request for this originates from the VF IWARP driver through * a client interface between VF LAN and VF IWARP driver. * A vector could have an AEQ and CEQ attached to it although * there is a single AEQ per VF IWARP instance in which case * most vectors will have an INVALID_IDX for aeq and valid idx for ceq. * There will never be a case where there will be multiple CEQs attached * to a single vector. * PF configures interrupt mapping and returns status. */ - -/* HW does not define a type value for AEQ; only for RX/TX and CEQ. - * In order for us to keep the interface simple, SW will define a - * unique type value for AEQ. - */ -#define QUEUE_TYPE_PE_AEQ 0x80 -#define QUEUE_INVALID_IDX 0xFFFF - struct virtchnl_iwarp_qv_info { u32 v_idx; /* msix_vector */ u16 ceq_idx; u16 aeq_idx; u8 itr_idx; }; VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info); struct virtchnl_iwarp_qvlist_info { u32 num_vectors; struct virtchnl_iwarp_qv_info qv_info[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info); +/* Since VF messages are limited by u16 size, precalculate the maximum possible + * values of nested elements in virtchnl structures that virtual channel can + * possibly handle in a single message. + */ +enum virtchnl_vector_limits { + VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX = + ((u16)(~0) - sizeof(struct virtchnl_vsi_queue_config_info)) / + sizeof(struct virtchnl_queue_pair_info), + + VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX = + ((u16)(~0) - sizeof(struct virtchnl_irq_map_info)) / + sizeof(struct virtchnl_vector_map), + + VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX = + ((u16)(~0) - sizeof(struct virtchnl_ether_addr_list)) / + sizeof(struct virtchnl_ether_addr), + + VIRTCHNL_OP_ADD_DEL_VLAN_MAX = + ((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list)) / + sizeof(u16), + + VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP_MAX = + ((u16)(~0) - sizeof(struct virtchnl_iwarp_qvlist_info)) / + sizeof(struct virtchnl_iwarp_qv_info), + + VIRTCHNL_OP_ENABLE_CHANNELS_MAX = + ((u16)(~0) - sizeof(struct virtchnl_tc_info)) / + sizeof(struct virtchnl_channel_info), +}; + /* VF reset states - these are written into the RSTAT register: * VFGEN_RSTAT on the VF * When the PF initiates a reset, it writes 0 * When the reset is complete, it writes 1 * When the PF detects that the VF has recovered, it writes 2 * VF checks this register periodically to determine if a reset has occurred, * then polls it to know when the reset is complete. * If either the PF or VF reads the register while the hardware * is in a reset state, it will return DEADBEEF, which, when masked * will result in 3. */ enum virtchnl_vfr_states { VIRTCHNL_VFR_INPROGRESS = 0, VIRTCHNL_VFR_COMPLETED, VIRTCHNL_VFR_VFACTIVE, }; /** * virtchnl_vc_validate_vf_msg * @ver: Virtchnl version info * @v_opcode: Opcode for the message * @msg: pointer to the msg buffer * @msglen: msg length * * validate msg format against struct for each opcode */ static inline int virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, u8 *msg, u16 msglen) { bool err_msg_format = FALSE; int valid_len = 0; /* Validate message length. */ switch (v_opcode) { case VIRTCHNL_OP_VERSION: valid_len = sizeof(struct virtchnl_version_info); break; case VIRTCHNL_OP_RESET_VF: break; case VIRTCHNL_OP_GET_VF_RESOURCES: if (VF_IS_V11(ver)) valid_len = sizeof(u32); break; case VIRTCHNL_OP_CONFIG_TX_QUEUE: valid_len = sizeof(struct virtchnl_txq_info); break; case VIRTCHNL_OP_CONFIG_RX_QUEUE: valid_len = sizeof(struct virtchnl_rxq_info); break; case VIRTCHNL_OP_CONFIG_VSI_QUEUES: valid_len = sizeof(struct virtchnl_vsi_queue_config_info); if (msglen >= valid_len) { struct virtchnl_vsi_queue_config_info *vqc = (struct virtchnl_vsi_queue_config_info *)msg; + + if (vqc->num_queue_pairs == 0 || vqc->num_queue_pairs > + VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX) { + err_msg_format = TRUE; + break; + } + valid_len += (vqc->num_queue_pairs * sizeof(struct virtchnl_queue_pair_info)); - if (vqc->num_queue_pairs == 0) - err_msg_format = TRUE; } break; case VIRTCHNL_OP_CONFIG_IRQ_MAP: valid_len = sizeof(struct virtchnl_irq_map_info); if (msglen >= valid_len) { struct virtchnl_irq_map_info *vimi = (struct virtchnl_irq_map_info *)msg; + + if (vimi->num_vectors == 0 || vimi->num_vectors > + VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX) { + err_msg_format = TRUE; + break; + } + valid_len += (vimi->num_vectors * sizeof(struct virtchnl_vector_map)); - if (vimi->num_vectors == 0) - err_msg_format = TRUE; } break; case VIRTCHNL_OP_ENABLE_QUEUES: case VIRTCHNL_OP_DISABLE_QUEUES: valid_len = sizeof(struct virtchnl_queue_select); break; case VIRTCHNL_OP_ADD_ETH_ADDR: case VIRTCHNL_OP_DEL_ETH_ADDR: valid_len = sizeof(struct virtchnl_ether_addr_list); if (msglen >= valid_len) { struct virtchnl_ether_addr_list *veal = (struct virtchnl_ether_addr_list *)msg; + + if (veal->num_elements == 0 || veal->num_elements > + VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX) { + err_msg_format = TRUE; + break; + } + valid_len += veal->num_elements * sizeof(struct virtchnl_ether_addr); - if (veal->num_elements == 0) - err_msg_format = TRUE; } break; case VIRTCHNL_OP_ADD_VLAN: case VIRTCHNL_OP_DEL_VLAN: valid_len = sizeof(struct virtchnl_vlan_filter_list); if (msglen >= valid_len) { struct virtchnl_vlan_filter_list *vfl = (struct virtchnl_vlan_filter_list *)msg; - valid_len += vfl->num_elements * sizeof(u16); - if (vfl->num_elements == 0) + + if (vfl->num_elements == 0 || vfl->num_elements > + VIRTCHNL_OP_ADD_DEL_VLAN_MAX) { err_msg_format = TRUE; + break; + } + + valid_len += vfl->num_elements * sizeof(u16); } break; case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: valid_len = sizeof(struct virtchnl_promisc_info); break; case VIRTCHNL_OP_GET_STATS: valid_len = sizeof(struct virtchnl_queue_select); break; case VIRTCHNL_OP_IWARP: /* These messages are opaque to us and will be validated in * the RDMA client code. We just need to check for nonzero * length. The firmware will enforce max length restrictions. */ if (msglen) valid_len = msglen; else err_msg_format = TRUE; break; case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: break; case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: valid_len = sizeof(struct virtchnl_iwarp_qvlist_info); if (msglen >= valid_len) { struct virtchnl_iwarp_qvlist_info *qv = (struct virtchnl_iwarp_qvlist_info *)msg; - if (qv->num_vectors == 0) { + + if (qv->num_vectors == 0 || qv->num_vectors > + VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP_MAX) { err_msg_format = TRUE; break; } + valid_len += ((qv->num_vectors - 1) * sizeof(struct virtchnl_iwarp_qv_info)); } break; case VIRTCHNL_OP_CONFIG_RSS_KEY: valid_len = sizeof(struct virtchnl_rss_key); if (msglen >= valid_len) { struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; valid_len += vrk->key_len - 1; } break; case VIRTCHNL_OP_CONFIG_RSS_LUT: valid_len = sizeof(struct virtchnl_rss_lut); if (msglen >= valid_len) { struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; valid_len += vrl->lut_entries - 1; } break; case VIRTCHNL_OP_GET_RSS_HENA_CAPS: break; case VIRTCHNL_OP_SET_RSS_HENA: valid_len = sizeof(struct virtchnl_rss_hena); break; case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: break; case VIRTCHNL_OP_REQUEST_QUEUES: valid_len = sizeof(struct virtchnl_vf_res_request); break; + case VIRTCHNL_OP_ENABLE_CHANNELS: + valid_len = sizeof(struct virtchnl_tc_info); + if (msglen >= valid_len) { + struct virtchnl_tc_info *vti = + (struct virtchnl_tc_info *)msg; + + if (vti->num_tc == 0 || vti->num_tc > + VIRTCHNL_OP_ENABLE_CHANNELS_MAX) { + err_msg_format = TRUE; + break; + } + + valid_len += (vti->num_tc - 1) * + sizeof(struct virtchnl_channel_info); + } + break; + case VIRTCHNL_OP_DISABLE_CHANNELS: + break; + case VIRTCHNL_OP_ADD_CLOUD_FILTER: + /* fall through */ + case VIRTCHNL_OP_DEL_CLOUD_FILTER: + valid_len = sizeof(struct virtchnl_filter); + break; /* These are always errors coming from the VF. */ case VIRTCHNL_OP_EVENT: case VIRTCHNL_OP_UNKNOWN: default: - return VIRTCHNL_ERR_PARAM; + return VIRTCHNL_STATUS_ERR_PARAM; } /* few more checks */ if (err_msg_format || valid_len != msglen) return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH; return 0; } #endif /* _VIRTCHNL_H_ */ Index: releng/11.3 =================================================================== --- releng/11.3 (revision 349180) +++ releng/11.3 (revision 349181) Property changes on: releng/11.3 ___________________________________________________________________ Added: svn:mergeinfo ## -0,0 +0,1 ## Merged /stable/11:r349163