diff --git a/sys/dev/qlnx/qlnxe/bcm_osal.h b/sys/dev/qlnx/qlnxe/bcm_osal.h index 5d940d3272d6..c820532c9e0a 100644 --- a/sys/dev/qlnx/qlnxe/bcm_osal.h +++ b/sys/dev/qlnx/qlnxe/bcm_osal.h @@ -1,542 +1,542 @@ /* * Copyright (c) 2017-2018 Cavium, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __BCM_OSAL_ECORE_PACKAGE #define __BCM_OSAL_ECORE_PACKAGE #include "qlnx_os.h" #include "ecore_status.h" #include #include #include #define OSAL_NUM_CPUS() mp_ncpus /* * prototypes of freebsd specific functions required by ecore */ extern uint32_t qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id); extern uint32_t qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value); extern uint32_t qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg, uint16_t *reg_value); extern uint32_t qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg, uint32_t *reg_value); extern void qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value); extern void qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg, uint16_t reg_value); extern void qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg, uint32_t reg_value); extern int qlnx_pci_find_capability(void *ecore_dev, int cap); extern int qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap); extern uint32_t qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr); extern void qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value); extern void qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value); extern uint32_t qlnx_reg_rd32(void *p_hwfn, uint32_t reg_addr); extern void qlnx_reg_wr32(void *p_hwfn, uint32_t reg_addr, uint32_t value); extern void qlnx_reg_wr16(void *p_hwfn, uint32_t reg_addr, uint16_t value); extern void qlnx_dbell_wr32(void *p_hwfn, uint32_t reg_addr, uint32_t value); extern void qlnx_dbell_wr32_db(void *p_hwfn, void *reg_addr, uint32_t value); extern void *qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size); extern void qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys, uint32_t size); extern void qlnx_link_update(void *p_hwfn); -extern void qlnx_barrier(void *p_hwfn); +extern void qlnx_barrier(void *p_dev); extern void *qlnx_zalloc(uint32_t size); extern void qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats); extern void qlnx_sp_isr(void *arg); extern void qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req, void *p_sw_info); extern void qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id); extern int qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params); extern int qlnx_iov_update_vport(void *p_hwfn, uint8_t vfid, void *params, uint16_t *tlvs); extern int qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id); extern void qlnx_vf_flr_update(void *p_hwfn); #define nothing do {} while(0) /* Memory Types */ #define u8 uint8_t #define u16 uint16_t #define u32 uint32_t #define u64 uint64_t #define s16 uint16_t #define s32 uint32_t #ifndef QLNX_RDMA static __inline unsigned long roundup_pow_of_two(unsigned long x) { return (1UL << flsl(x - 1)); } static __inline int is_power_of_2(unsigned long n) { return (n == roundup_pow_of_two(n)); } static __inline unsigned long rounddown_pow_of_two(unsigned long x) { return (1UL << (flsl(x) - 1)); } #define max_t(type, val1, val2) \ ((type)(val1) > (type)(val2) ? (type)(val1) : (val2)) #define min_t(type, val1, val2) \ ((type)(val1) < (type)(val2) ? (type)(val1) : (val2)) #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0])) #define BUILD_BUG_ON(cond) nothing #endif /* #ifndef QLNX_RDMA */ #define OSAL_UNUSED #define OSAL_CPU_TO_BE64(val) htobe64(val) #define OSAL_BE64_TO_CPU(val) be64toh(val) #define OSAL_CPU_TO_BE32(val) htobe32(val) #define OSAL_BE32_TO_CPU(val) be32toh(val) #define OSAL_CPU_TO_LE32(val) htole32(val) #define OSAL_LE32_TO_CPU(val) le32toh(val) #define OSAL_CPU_TO_BE16(val) htobe16(val) #define OSAL_BE16_TO_CPU(val) be16toh(val) #define OSAL_CPU_TO_LE16(val) htole16(val) #define OSAL_LE16_TO_CPU(val) le16toh(val) static __inline uint32_t qlnx_get_cache_line_size(void) { return (CACHE_LINE_SIZE); } #define OSAL_CACHE_LINE_SIZE qlnx_get_cache_line_size() #define OSAL_BE32 uint32_t #define dma_addr_t bus_addr_t #define osal_size_t size_t typedef struct mtx osal_spinlock_t; typedef struct mtx osal_mutex_t; typedef void * osal_dpc_t; typedef struct _osal_list_entry_t { struct _osal_list_entry_t *next, *prev; } osal_list_entry_t; typedef struct osal_list_t { osal_list_entry_t *head, *tail; unsigned long cnt; } osal_list_t; /* OSAL functions */ #define OSAL_UDELAY(time) DELAY(time) #define OSAL_MSLEEP(time) qlnx_mdelay(__func__, time) #define OSAL_ALLOC(dev, GFP, size) qlnx_zalloc(size) #define OSAL_ZALLOC(dev, GFP, size) qlnx_zalloc(size) #define OSAL_VALLOC(dev, size) qlnx_zalloc(size) #define OSAL_VZALLOC(dev, size) qlnx_zalloc(size) #define OSAL_FREE(dev, memory) free(memory, M_QLNXBUF) #define OSAL_VFREE(dev, memory) free(memory, M_QLNXBUF) #define OSAL_MEM_ZERO(mem, size) bzero(mem, size) #define OSAL_MEMCPY(dst, src, size) memcpy(dst, src, size) #define OSAL_DMA_ALLOC_COHERENT(dev, phys, size) \ qlnx_dma_alloc_coherent(dev, phys, size) #define OSAL_DMA_FREE_COHERENT(dev, virt, phys, size) \ qlnx_dma_free_coherent(dev, virt, phys, size) #define OSAL_VF_CQE_COMPLETION(_dev_p, _cqe, _protocol) (0) #define REG_WR(hwfn, addr, val) qlnx_reg_wr32(hwfn, addr, val) #define REG_WR16(hwfn, addr, val) qlnx_reg_wr16(hwfn, addr, val) #define DIRECT_REG_WR(p_hwfn, addr, value) qlnx_direct_reg_wr32(p_hwfn, addr, value) #define DIRECT_REG_WR64(p_hwfn, addr, value) \ qlnx_direct_reg_wr64(p_hwfn, addr, value) #define DIRECT_REG_WR_DB(p_hwfn, addr, value) qlnx_dbell_wr32_db(p_hwfn, addr, value) #define DIRECT_REG_RD(p_hwfn, addr) qlnx_direct_reg_rd32(p_hwfn, addr) #define REG_RD(hwfn, addr) qlnx_reg_rd32(hwfn, addr) #define DOORBELL(hwfn, addr, value) \ qlnx_dbell_wr32(hwfn, addr, value) #define OSAL_SPIN_LOCK_ALLOC(p_hwfn, mutex) #define OSAL_SPIN_LOCK_DEALLOC(mutex) mtx_destroy(mutex) #define OSAL_SPIN_LOCK_INIT(lock) {\ - mtx_init(lock, __func__, MTX_NETWORK_LOCK, MTX_SPIN); \ + mtx_init(lock, __func__, "OSAL spin lock", MTX_SPIN); \ } #define OSAL_SPIN_UNLOCK(lock) {\ - mtx_unlock(lock); \ + mtx_unlock_spin(lock); \ } #define OSAL_SPIN_LOCK(lock) {\ - mtx_lock(lock); \ + mtx_lock_spin(lock); \ } #define OSAL_MUTEX_ALLOC(p_hwfn, mutex) #define OSAL_MUTEX_DEALLOC(mutex) mtx_destroy(mutex) #define OSAL_MUTEX_INIT(lock) {\ mtx_init(lock, __func__, MTX_NETWORK_LOCK, MTX_DEF);\ } #define OSAL_MUTEX_ACQUIRE(lock) mtx_lock(lock) #define OSAL_MUTEX_RELEASE(lock) mtx_unlock(lock) #define OSAL_DPC_ALLOC(hwfn) malloc(PAGE_SIZE, M_QLNXBUF, M_NOWAIT) #define OSAL_DPC_INIT(dpc, hwfn) nothing extern void qlnx_schedule_recovery(void *p_hwfn); #define OSAL_SCHEDULE_RECOVERY_HANDLER(x) do {qlnx_schedule_recovery(x);} while(0) #define OSAL_HW_ERROR_OCCURRED(hwfn, err_type) nothing #define OSAL_DPC_SYNC(hwfn) nothing static inline void OSAL_DCBX_AEN(void *p_hwfn, u32 mib_type) { return; } static inline bool OSAL_NVM_IS_ACCESS_ENABLED(void *p_hwfn) { return 1; } #define OSAL_LIST_INIT(list) \ do { \ (list)->head = NULL; \ (list)->tail = NULL; \ (list)->cnt = 0; \ } while (0) #define OSAL_LIST_INSERT_ENTRY_AFTER(entry, entry_prev, list) \ do { \ (entry)->prev = (entry_prev); \ (entry)->next = (entry_prev)->next; \ (entry)->next->prev = (entry); \ (entry_prev)->next = (entry); \ (list)->cnt++; \ } while (0); #define OSAL_LIST_SPLICE_TAIL_INIT(new_list, list) \ do { \ ((new_list)->tail)->next = ((list)->head); \ ((list)->head)->prev = ((new_list)->tail); \ (list)->head = (new_list)->head; \ (list)->cnt = (list)->cnt + (new_list)->cnt; \ OSAL_LIST_INIT(new_list); \ } while (0); #define OSAL_LIST_PUSH_HEAD(entry, list) \ do { \ (entry)->prev = (osal_list_entry_t *)0; \ (entry)->next = (list)->head; \ if ((list)->tail == (osal_list_entry_t *)0) { \ (list)->tail = (entry); \ } else { \ (list)->head->prev = (entry); \ } \ (list)->head = (entry); \ (list)->cnt++; \ } while (0) #define OSAL_LIST_PUSH_TAIL(entry, list) \ do { \ (entry)->next = (osal_list_entry_t *)0; \ (entry)->prev = (list)->tail; \ if ((list)->tail) { \ (list)->tail->next = (entry); \ } else { \ (list)->head = (entry); \ } \ (list)->tail = (entry); \ (list)->cnt++; \ } while (0) #define OSAL_LIST_FIRST_ENTRY(list, type, field) \ (type *)((list)->head) #define OSAL_LIST_REMOVE_ENTRY(entry, list) \ do { \ if ((list)->head == (entry)) { \ if ((list)->head) { \ (list)->head = (list)->head->next; \ if ((list)->head) { \ (list)->head->prev = (osal_list_entry_t *)0; \ } else { \ (list)->tail = (osal_list_entry_t *)0; \ } \ (list)->cnt--; \ } \ } else if ((list)->tail == (entry)) { \ if ((list)->tail) { \ (list)->tail = (list)->tail->prev; \ if ((list)->tail) { \ (list)->tail->next = (osal_list_entry_t *)0; \ } else { \ (list)->head = (osal_list_entry_t *)0; \ } \ (list)->cnt--; \ } \ } else { \ (entry)->prev->next = (entry)->next; \ (entry)->next->prev = (entry)->prev; \ (list)->cnt--; \ } \ } while (0) #define OSAL_LIST_IS_EMPTY(list) \ ((list)->cnt == 0) #define OSAL_LIST_NEXT(entry, field, type) \ (type *)((&((entry)->field))->next) #define OSAL_LIST_FOR_EACH_ENTRY(entry, list, field, type) \ for (entry = OSAL_LIST_FIRST_ENTRY(list, type, field); \ entry; \ entry = OSAL_LIST_NEXT(entry, field, type)) #define OSAL_LIST_FOR_EACH_ENTRY_SAFE(entry, tmp_entry, list, field, type) \ for (entry = OSAL_LIST_FIRST_ENTRY(list, type, field), \ tmp_entry = (entry) ? OSAL_LIST_NEXT(entry, field, type) : NULL; \ entry != NULL; \ entry = (type *)tmp_entry, \ tmp_entry = (entry) ? OSAL_LIST_NEXT(entry, field, type) : NULL) #define OSAL_BAR_SIZE(dev, bar_id) qlnx_pci_bus_get_bar_size(dev, bar_id) #define OSAL_PCI_READ_CONFIG_BYTE(dev, reg, value) \ qlnx_pci_read_config_byte(dev, reg, value); #define OSAL_PCI_READ_CONFIG_WORD(dev, reg, value) \ qlnx_pci_read_config_word(dev, reg, value); #define OSAL_PCI_READ_CONFIG_DWORD(dev, reg, value) \ qlnx_pci_read_config_dword(dev, reg, value); #define OSAL_PCI_WRITE_CONFIG_BYTE(dev, reg, value) \ qlnx_pci_write_config_byte(dev, reg, value); #define OSAL_PCI_WRITE_CONFIG_WORD(dev, reg, value) \ qlnx_pci_write_config_word(dev, reg, value); #define OSAL_PCI_WRITE_CONFIG_DWORD(dev, reg, value) \ qlnx_pci_write_config_dword(dev, reg, value); #define OSAL_PCI_FIND_CAPABILITY(dev, cap) qlnx_pci_find_capability(dev, cap) #define OSAL_PCI_FIND_EXT_CAPABILITY(dev, ext_cap) \ qlnx_pci_find_ext_capability(dev, ext_cap) #define OSAL_MMIOWB(dev) qlnx_barrier(dev) #define OSAL_BARRIER(dev) qlnx_barrier(dev) #define OSAL_SMP_MB(dev) mb() #define OSAL_SMP_RMB(dev) rmb() #define OSAL_SMP_WMB(dev) wmb() #define OSAL_RMB(dev) rmb() #define OSAL_WMB(dev) wmb() #define OSAL_DMA_SYNC(dev, addr, length, is_post) #define OSAL_FIND_FIRST_BIT find_first_bit #define OSAL_SET_BIT(bit, bitmap) bit_set((bitstr_t *)bitmap, bit) #define OSAL_CLEAR_BIT(bit, bitmap) bit_clear((bitstr_t *)bitmap, bit) #define OSAL_TEST_BIT(bit, bitmap) bit_test((bitstr_t *)bitmap, bit) #define OSAL_FIND_FIRST_ZERO_BIT(bitmap, length) \ find_first_zero_bit(bitmap, length) #define OSAL_LINK_UPDATE(hwfn, ptt) qlnx_link_update(hwfn) #define QLNX_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) #define QLNX_ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) #define OSAL_NUM_ACTIVE_CPU() mp_ncpus #ifndef DIV_ROUND_UP #define DIV_ROUND_UP(size, to_what) QLNX_DIV_ROUND_UP((size), (to_what)) #endif #define ROUNDUP(value, to_what) QLNX_ROUNDUP((value), (to_what)) #define OSAL_ROUNDUP_POW_OF_TWO(val) roundup_pow_of_two((val)) static __inline uint32_t qlnx_log2(uint32_t x) { uint32_t log = 0; while (x >>= 1) log++; return (log); } #define OSAL_LOG2(val) qlnx_log2(val) #define OFFSETOF(str, field) offsetof(str, field) #define PRINT device_printf #define PRINT_ERR device_printf #define OSAL_ASSERT(is_assert) nothing #define OSAL_BEFORE_PF_START(cdev, my_id) {}; #define OSAL_AFTER_PF_STOP(cdev, my_id) {}; #define INLINE __inline #define OSAL_INLINE __inline #define OSAL_UNLIKELY #define OSAL_NULL NULL #define OSAL_MAX_T(type, __max1, __max2) max_t(type, __max1, __max2) #define OSAL_MIN_T(type, __max1, __max2) min_t(type, __max1, __max2) #define __iomem #define OSAL_IOMEM #define int_ptr_t void * #define osal_int_ptr_t void * #define OSAL_BUILD_BUG_ON(cond) nothing #define REG_ADDR(hwfn, offset) (void *)((u8 *)(hwfn->regview) + (offset)) #define OSAL_REG_ADDR(hwfn, offset) (void *)((u8 *)(hwfn->regview) + (offset)) #define OSAL_PAGE_SIZE PAGE_SIZE #define OSAL_STRCPY(dst, src) strcpy(dst, src) #define OSAL_STRNCPY(dst, src, bytes) strncpy(dst, src, bytes) #define OSAL_STRLEN(src) strlen(src) #define OSAL_SPRINTF sprintf #define OSAL_SNPRINTF snprintf #define OSAL_MEMSET memset #define OSAL_ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0])) #define osal_uintptr_t u64 #define OSAL_SLOWPATH_IRQ_REQ(p_hwfn) (0) #define OSAL_GET_PROTOCOL_STATS(p_hwfn, type, stats) \ qlnx_get_protocol_stats(p_hwfn, type, stats); #define OSAL_POLL_MODE_DPC(hwfn) {if (cold) qlnx_sp_isr(hwfn);} #define OSAL_WARN(cond, fmt, args...) \ if (cond) printf("%s: WARNING: " fmt, __func__, ## args); #define OSAL_BITMAP_WEIGHT(bitmap, nbits) bitmap_weight(bitmap, nbits) #define OSAL_GET_RDMA_SB_ID(p_hwfn, cnq_id) ecore_rdma_get_sb_id(p_hwfn, cnq_id) static inline int qlnx_test_and_change_bit(long bit, volatile unsigned long *var) { long val; var += BIT_WORD(bit); bit %= BITS_PER_LONG; bit = (1UL << bit); val = *var; if (val & bit) return (test_and_clear_bit(bit, var)); return (test_and_set_bit(bit, var)); } #define OSAL_TEST_AND_FLIP_BIT qlnx_test_and_change_bit #define OSAL_TEST_AND_CLEAR_BIT test_and_clear_bit #define OSAL_MEMCMP memcmp #define OSAL_SPIN_LOCK_IRQSAVE(x, y) { (void)y; mtx_lock(x); } #define OSAL_SPIN_UNLOCK_IRQSAVE(x, y) { (void)y; mtx_unlock(x); } static inline u32 OSAL_CRC32(u32 crc, u8 *ptr, u32 length) { int i; while (length--) { crc ^= *ptr++; for (i = 0; i < 8; i++) crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0); } return crc; } static inline void OSAL_CRC8_POPULATE(u8 * cdu_crc8_table, u8 polynomial) { return; } static inline u8 OSAL_CRC8(u8 * cdu_crc8_table, u8 * data_to_crc, int data_to_crc_len, u8 init_value) { return ECORE_NOTIMPL; } #define OSAL_HW_INFO_CHANGE(p_hwfn, offset) #define OSAL_MFW_TLV_REQ(p_hwfn) #define OSAL_LLDP_RX_TLVS(p_hwfn, buffer, len) #define OSAL_MFW_CMD_PREEMPT(p_hwfn) #define OSAL_TRANSCEIVER_UPDATE(p_hwfn) #define OSAL_MFW_FILL_TLV_DATA(p_hwfn, group, data) (0) #define OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(p_hwfn, res) (0) #define OSAL_VF_FILL_ACQUIRE_RESC_REQ(p_hwfn, req, vf_sw_info) \ qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, req, vf_sw_info) #define OSAL_IOV_PF_RESP_TYPE(p_hwfn, relative_vf_id, status) #define OSAL_IOV_VF_CLEANUP(p_hwfn, relative_vf_id) \ qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id) #define OSAL_IOV_VF_ACQUIRE(p_hwfn, relative_vf_id) ECORE_SUCCESS #define OSAL_IOV_GET_OS_TYPE() VFPF_ACQUIRE_OS_FREEBSD #define OSAL_IOV_PRE_START_VPORT(p_hwfn, relative_vf_id, params) ECORE_SUCCESS #define OSAL_IOV_POST_START_VPORT(p_hwfn, relative_vf_id, vport_id, opaque_fid) #define OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, x, y, z) ECORE_SUCCESS #define OSAL_IOV_CHK_UCAST(p_hwfn, vfid, params) \ qlnx_iov_chk_ucast(p_hwfn, vfid, params); #define OSAL_PF_VF_MALICIOUS(p_hwfn, relative_vf_id) #define OSAL_IOV_VF_MSG_TYPE(p_hwfn, relative_vf_id, type) #define OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vfid, params, tlvs) \ qlnx_iov_update_vport(p_hwfn, vfid, params, tlvs) #define OSAL_PF_VF_MSG(p_hwfn, relative_vf_id) \ qlnx_pf_vf_msg(p_hwfn, relative_vf_id) #define OSAL_VF_FLR_UPDATE(p_hwfn) qlnx_vf_flr_update(p_hwfn) #define OSAL_IOV_VF_VPORT_STOP(p_hwfn, vf) #endif /* #ifdef __BCM_OSAL_ECORE_PACKAGE */ diff --git a/sys/dev/qlnx/qlnxe/ecore.h b/sys/dev/qlnx/qlnxe/ecore.h index 8fcbc1f8d8a0..eda7c260ba99 100644 --- a/sys/dev/qlnx/qlnxe/ecore.h +++ b/sys/dev/qlnx/qlnxe/ecore.h @@ -1,1074 +1,1075 @@ /* * Copyright (c) 2017-2018 Cavium, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #ifndef __ECORE_H #define __ECORE_H #include "ecore_status.h" #include "ecore_hsi_common.h" #include "ecore_hsi_debug_tools.h" #include "ecore_hsi_init_func.h" #include "ecore_hsi_init_tool.h" #include "ecore_proto_if.h" #include "mcp_public.h" #define ECORE_MAJOR_VERSION 8 #define ECORE_MINOR_VERSION 33 #define ECORE_REVISION_VERSION 5 #define ECORE_ENGINEERING_VERSION 0 #define ECORE_VERSION \ ((ECORE_MAJOR_VERSION << 24) | (ECORE_MINOR_VERSION << 16) | \ (ECORE_REVISION_VERSION << 8) | ECORE_ENGINEERING_VERSION) #define STORM_FW_VERSION \ ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \ (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION) #define MAX_HWFNS_PER_DEVICE 2 #define NAME_SIZE 16 #define ARRAY_DECL static const #define ECORE_WFQ_UNIT 100 /* Constants */ #define ECORE_WID_SIZE (1024) #define ECORE_MIN_WIDS (4) /* Configurable */ #define ECORE_PF_DEMS_SIZE (4) /* cau states */ enum ecore_coalescing_mode { ECORE_COAL_MODE_DISABLE, ECORE_COAL_MODE_ENABLE }; enum ecore_nvm_cmd { ECORE_PUT_FILE_BEGIN = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, ECORE_PUT_FILE_DATA = DRV_MSG_CODE_NVM_PUT_FILE_DATA, ECORE_NVM_READ_NVRAM = DRV_MSG_CODE_NVM_READ_NVRAM, ECORE_NVM_WRITE_NVRAM = DRV_MSG_CODE_NVM_WRITE_NVRAM, ECORE_NVM_DEL_FILE = DRV_MSG_CODE_NVM_DEL_FILE, ECORE_EXT_PHY_FW_UPGRADE = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE, ECORE_NVM_SET_SECURE_MODE = DRV_MSG_CODE_SET_SECURE_MODE, ECORE_PHY_RAW_READ = DRV_MSG_CODE_PHY_RAW_READ, ECORE_PHY_RAW_WRITE = DRV_MSG_CODE_PHY_RAW_WRITE, ECORE_PHY_CORE_READ = DRV_MSG_CODE_PHY_CORE_READ, ECORE_PHY_CORE_WRITE = DRV_MSG_CODE_PHY_CORE_WRITE, ECORE_ENCRYPT_PASSWORD = DRV_MSG_CODE_ENCRYPT_PASSWORD, ECORE_GET_MCP_NVM_RESP = 0xFFFFFF00 }; #ifndef LINUX_REMOVE #if !defined(CONFIG_ECORE_L2) && !defined(CONFIG_ECORE_ROCE) && \ !defined(CONFIG_ECORE_FCOE) && !defined(CONFIG_ECORE_ISCSI) && \ !defined(CONFIG_ECORE_IWARP) #define CONFIG_ECORE_L2 #define CONFIG_ECORE_SRIOV #define CONFIG_ECORE_ROCE #define CONFIG_ECORE_IWARP #define CONFIG_ECORE_LL2 #define CONFIG_ECORE_RDMA #define ECORE_CONFIG_DIRECT_HWFN #define QLNX_ENABLE_IWARP #endif #endif /* helpers */ #ifndef __EXTRACT__LINUX__IF__ #define MASK_FIELD(_name, _value) \ ((_value) &= (_name##_MASK)) #define FIELD_VALUE(_name, _value) \ ((_value & _name##_MASK) << _name##_SHIFT) #define SET_FIELD(value, name, flag) \ do { \ (value) &= ~(name##_MASK << name##_SHIFT); \ (value) |= ((((u64)flag) & (u64)name##_MASK) << (name##_SHIFT));\ } while (0) #define GET_FIELD(value, name) \ (((value) >> (name##_SHIFT)) & name##_MASK) #define GET_MFW_FIELD(name, field) \ (((name) & (field ## _MASK)) >> (field ## _OFFSET)) #define SET_MFW_FIELD(name, field, value) \ do { \ (name) &= ~(field ## _MASK); \ (name) |= (((value) << (field ## _OFFSET)) & (field ## _MASK)); \ } while (0) #endif static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS) { u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) | (cid * ECORE_PF_DEMS_SIZE); return db_addr; } static OSAL_INLINE u32 DB_ADDR_VF(u32 cid, u32 DEMS) { u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) | FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid); return db_addr; } #define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \ ((sizeof(type_name) + (u32)(1<<(p_hwfn->p_dev->cache_shift))-1) & \ ~((1<<(p_hwfn->p_dev->cache_shift))-1)) #ifndef LINUX_REMOVE #ifndef U64_HI #define U64_HI(val) ((u32)(((u64)(val)) >> 32)) #endif #ifndef U64_LO #define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff)) #endif #endif #ifndef __EXTRACT__LINUX__IF__ #ifndef UEFI /* Debug print definitions */ #define DP_ERR(p_dev, fmt, ...) \ do { \ PRINT_ERR((p_dev)->dp_ctx, "[%s:%d(%s)]" fmt, \ __func__, __LINE__, \ (p_dev)->name ? (p_dev)->name : "", \ ##__VA_ARGS__); \ } while (0) #define DP_NOTICE(p_dev, is_assert, fmt, ...) \ do { \ if (OSAL_UNLIKELY((p_dev)->dp_level <= ECORE_LEVEL_NOTICE)) { \ PRINT((p_dev)->dp_ctx, "[%s:%d(%s)]" fmt, \ __func__, __LINE__, \ (p_dev)->name ? (p_dev)->name : "", \ ##__VA_ARGS__); \ OSAL_ASSERT(!is_assert); \ } \ } while (0) #define DP_INFO(p_dev, fmt, ...) \ do { \ if (OSAL_UNLIKELY((p_dev)->dp_level <= ECORE_LEVEL_INFO)) { \ PRINT((p_dev)->dp_ctx, "[%s:%d(%s)]" fmt, \ __func__, __LINE__, \ (p_dev)->name ? (p_dev)->name : "", \ ##__VA_ARGS__); \ } \ } while (0) #define DP_VERBOSE(p_dev, module, fmt, ...) \ do { \ if (OSAL_UNLIKELY(((p_dev)->dp_level <= ECORE_LEVEL_VERBOSE) && \ ((p_dev)->dp_module & module))) { \ PRINT((p_dev)->dp_ctx, "[%s:%d(%s)]" fmt, \ __func__, __LINE__, \ (p_dev)->name ? (p_dev)->name : "", \ ##__VA_ARGS__); \ } \ } while (0) #endif enum DP_LEVEL { ECORE_LEVEL_VERBOSE = 0x0, ECORE_LEVEL_INFO = 0x1, ECORE_LEVEL_NOTICE = 0x2, ECORE_LEVEL_ERR = 0x3, }; #define ECORE_LOG_LEVEL_SHIFT (30) #define ECORE_LOG_VERBOSE_MASK (0x3fffffff) #define ECORE_LOG_INFO_MASK (0x40000000) #define ECORE_LOG_NOTICE_MASK (0x80000000) enum DP_MODULE { #ifndef LINUX_REMOVE ECORE_MSG_DRV = 0x0001, ECORE_MSG_PROBE = 0x0002, ECORE_MSG_LINK = 0x0004, ECORE_MSG_TIMER = 0x0008, ECORE_MSG_IFDOWN = 0x0010, ECORE_MSG_IFUP = 0x0020, ECORE_MSG_RX_ERR = 0x0040, ECORE_MSG_TX_ERR = 0x0080, ECORE_MSG_TX_QUEUED = 0x0100, ECORE_MSG_INTR = 0x0200, ECORE_MSG_TX_DONE = 0x0400, ECORE_MSG_RX_STATUS = 0x0800, ECORE_MSG_PKTDATA = 0x1000, ECORE_MSG_HW = 0x2000, ECORE_MSG_WOL = 0x4000, #endif ECORE_MSG_SPQ = 0x10000, ECORE_MSG_STATS = 0x20000, ECORE_MSG_DCB = 0x40000, ECORE_MSG_IOV = 0x80000, ECORE_MSG_SP = 0x100000, ECORE_MSG_STORAGE = 0x200000, ECORE_MSG_OOO = 0x200000, ECORE_MSG_CXT = 0x800000, ECORE_MSG_LL2 = 0x1000000, ECORE_MSG_ILT = 0x2000000, ECORE_MSG_RDMA = 0x4000000, ECORE_MSG_DEBUG = 0x8000000, /* to be added...up to 0x8000000 */ }; #endif #define for_each_hwfn(p_dev, i) for (i = 0; i < p_dev->num_hwfns; i++) #define D_TRINE(val, cond1, cond2, true1, true2, def) \ (val == (cond1) ? true1 : \ (val == (cond2) ? true2 : def)) /* forward */ struct ecore_ptt_pool; struct ecore_spq; struct ecore_sb_info; struct ecore_sb_attn_info; struct ecore_cxt_mngr; struct ecore_dma_mem; struct ecore_sb_sp_info; struct ecore_ll2_info; struct ecore_l2_info; struct ecore_igu_info; struct ecore_mcp_info; struct ecore_dcbx_info; struct ecore_llh_info; struct ecore_rt_data { u32 *init_val; bool *b_valid; }; enum ecore_tunn_mode { ECORE_MODE_L2GENEVE_TUNN, ECORE_MODE_IPGENEVE_TUNN, ECORE_MODE_L2GRE_TUNN, ECORE_MODE_IPGRE_TUNN, ECORE_MODE_VXLAN_TUNN, }; enum ecore_tunn_clss { ECORE_TUNN_CLSS_MAC_VLAN, ECORE_TUNN_CLSS_MAC_VNI, ECORE_TUNN_CLSS_INNER_MAC_VLAN, ECORE_TUNN_CLSS_INNER_MAC_VNI, ECORE_TUNN_CLSS_MAC_VLAN_DUAL_STAGE, MAX_ECORE_TUNN_CLSS, }; struct ecore_tunn_update_type { bool b_update_mode; bool b_mode_enabled; enum ecore_tunn_clss tun_cls; }; struct ecore_tunn_update_udp_port { bool b_update_port; u16 port; }; struct ecore_tunnel_info { struct ecore_tunn_update_type vxlan; struct ecore_tunn_update_type l2_geneve; struct ecore_tunn_update_type ip_geneve; struct ecore_tunn_update_type l2_gre; struct ecore_tunn_update_type ip_gre; struct ecore_tunn_update_udp_port vxlan_port; struct ecore_tunn_update_udp_port geneve_port; bool b_update_rx_cls; bool b_update_tx_cls; }; /* The PCI personality is not quite synonymous to protocol ID: * 1. All personalities need CORE connections * 2. The Ethernet personality may support also the RoCE/iWARP protocol */ enum ecore_pci_personality { ECORE_PCI_ETH, ECORE_PCI_FCOE, ECORE_PCI_ISCSI, ECORE_PCI_ETH_ROCE, ECORE_PCI_ETH_IWARP, ECORE_PCI_ETH_RDMA, ECORE_PCI_DEFAULT /* default in shmem */ }; /* All VFs are symetric, all counters are PF + all VFs */ struct ecore_qm_iids { u32 cids; u32 vf_cids; u32 tids; }; /* The PCI relax ordering is either taken care by management FW or can be * enable/disable by ecore client. */ enum ecore_pci_rlx_odr { ECORE_DEFAULT_RLX_ODR, ECORE_ENABLE_RLX_ODR, ECORE_DISABLE_RLX_ODR }; #define MAX_PF_PER_PORT 8 /* HW / FW resources, output of features supported below, most information * is received from MFW. */ enum ecore_resources { ECORE_L2_QUEUE, ECORE_VPORT, ECORE_RSS_ENG, ECORE_PQ, ECORE_RL, ECORE_MAC, ECORE_VLAN, ECORE_RDMA_CNQ_RAM, ECORE_ILT, ECORE_LL2_QUEUE, ECORE_CMDQS_CQS, ECORE_RDMA_STATS_QUEUE, ECORE_BDQ, /* This is needed only internally for matching against the IGU. * In case of legacy MFW, would be set to `0'. */ ECORE_SB, ECORE_MAX_RESC, }; /* Features that require resources, given as input to the resource management * algorithm, the output are the resources above */ enum ecore_feature { ECORE_PF_L2_QUE, ECORE_PF_TC, ECORE_VF, ECORE_EXTRA_VF_QUE, ECORE_VMQ, ECORE_RDMA_CNQ, ECORE_ISCSI_CQ, ECORE_FCOE_CQ, ECORE_VF_L2_QUE, ECORE_MAX_FEATURES, }; enum ecore_port_mode { ECORE_PORT_MODE_DE_2X40G, ECORE_PORT_MODE_DE_2X50G, ECORE_PORT_MODE_DE_1X100G, ECORE_PORT_MODE_DE_4X10G_F, ECORE_PORT_MODE_DE_4X10G_E, ECORE_PORT_MODE_DE_4X20G, ECORE_PORT_MODE_DE_1X40G, ECORE_PORT_MODE_DE_2X25G, ECORE_PORT_MODE_DE_1X25G, ECORE_PORT_MODE_DE_4X25G, ECORE_PORT_MODE_DE_2X10G, }; enum ecore_dev_cap { ECORE_DEV_CAP_ETH, ECORE_DEV_CAP_FCOE, ECORE_DEV_CAP_ISCSI, ECORE_DEV_CAP_ROCE, ECORE_DEV_CAP_IWARP }; #ifndef __EXTRACT__LINUX__IF__ enum ecore_hw_err_type { ECORE_HW_ERR_FAN_FAIL, ECORE_HW_ERR_MFW_RESP_FAIL, ECORE_HW_ERR_HW_ATTN, ECORE_HW_ERR_DMAE_FAIL, ECORE_HW_ERR_RAMROD_FAIL, ECORE_HW_ERR_FW_ASSERT, }; #endif enum ecore_wol_support { ECORE_WOL_SUPPORT_NONE, ECORE_WOL_SUPPORT_PME, }; enum ecore_db_rec_exec { DB_REC_DRY_RUN, DB_REC_REAL_DEAL, DB_REC_ONCE, }; struct ecore_hw_info { /* PCI personality */ enum ecore_pci_personality personality; #define ECORE_IS_RDMA_PERSONALITY(dev) \ ((dev)->hw_info.personality == ECORE_PCI_ETH_ROCE || \ (dev)->hw_info.personality == ECORE_PCI_ETH_IWARP || \ (dev)->hw_info.personality == ECORE_PCI_ETH_RDMA) #define ECORE_IS_ROCE_PERSONALITY(dev) \ ((dev)->hw_info.personality == ECORE_PCI_ETH_ROCE || \ (dev)->hw_info.personality == ECORE_PCI_ETH_RDMA) #define ECORE_IS_IWARP_PERSONALITY(dev) \ ((dev)->hw_info.personality == ECORE_PCI_ETH_IWARP || \ (dev)->hw_info.personality == ECORE_PCI_ETH_RDMA) #define ECORE_IS_L2_PERSONALITY(dev) \ ((dev)->hw_info.personality == ECORE_PCI_ETH || \ ECORE_IS_RDMA_PERSONALITY(dev)) #define ECORE_IS_FCOE_PERSONALITY(dev) \ ((dev)->hw_info.personality == ECORE_PCI_FCOE) #define ECORE_IS_ISCSI_PERSONALITY(dev) \ ((dev)->hw_info.personality == ECORE_PCI_ISCSI) /* Resource Allocation scheme results */ u32 resc_start[ECORE_MAX_RESC]; u32 resc_num[ECORE_MAX_RESC]; u32 feat_num[ECORE_MAX_FEATURES]; #define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc]) #define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc]) #define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \ RESC_NUM(_p_hwfn, resc)) #define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc]) /* Amount of traffic classes HW supports */ u8 num_hw_tc; /* Amount of TCs which should be active according to DCBx or upper layer driver configuration */ u8 num_active_tc; /* The traffic class used by PF for it's offloaded protocol */ u8 offload_tc; u32 concrete_fid; u16 opaque_fid; u16 ovlan; u32 part_num[4]; #ifndef ETH_ALEN #define ETH_ALEN 6 /* @@@ TBD - define somewhere else for Windows */ #endif unsigned char hw_mac_addr[ETH_ALEN]; u16 num_iscsi_conns; u16 num_fcoe_conns; struct ecore_igu_info *p_igu_info; /* Sriov */ u8 max_chains_per_vf; u32 port_mode; u32 hw_mode; unsigned long device_capabilities; #ifndef __EXTRACT__LINUX__THROW__ /* Default DCBX mode */ u8 dcbx_mode; #endif u16 mtu; enum ecore_wol_support b_wol_support; }; /* maximun size of read/write commands (HW limit) */ #define DMAE_MAX_RW_SIZE 0x2000 struct ecore_dmae_info { /* Spinlock for synchronizing access to functions */ osal_spinlock_t lock; bool b_mem_ready; u8 channel; dma_addr_t completion_word_phys_addr; /* The memory location where the DMAE writes the completion * value when an operation is finished on this context. */ u32 *p_completion_word; dma_addr_t intermediate_buffer_phys_addr; /* An intermediate buffer for DMAE operations that use virtual * addresses - data is DMA'd to/from this buffer and then * memcpy'd to/from the virtual address */ u32 *p_intermediate_buffer; dma_addr_t dmae_cmd_phys_addr; struct dmae_cmd *p_dmae_cmd; }; struct ecore_wfq_data { u32 default_min_speed; /* When wfq feature is not configured */ u32 min_speed; /* when feature is configured for any 1 vport */ bool configured; }; struct ecore_qm_info { struct init_qm_pq_params *qm_pq_params; struct init_qm_vport_params *qm_vport_params; struct init_qm_port_params *qm_port_params; u16 start_pq; u8 start_vport; u16 pure_lb_pq; u16 offload_pq; u16 low_latency_pq; u16 pure_ack_pq; u16 ooo_pq; u16 first_vf_pq; u16 first_mcos_pq; u16 first_rl_pq; u16 num_pqs; u16 num_vf_pqs; u8 num_vports; u8 max_phys_tcs_per_port; u8 ooo_tc; bool pf_rl_en; bool pf_wfq_en; bool vport_rl_en; bool vport_wfq_en; u8 pf_wfq; u32 pf_rl; struct ecore_wfq_data *wfq_data; u8 num_pf_rls; }; struct ecore_db_recovery_info { osal_list_t list; osal_spinlock_t lock; u32 db_recovery_counter; }; struct storm_stats { u32 address; u32 len; }; struct ecore_fw_data { #ifdef CONFIG_ECORE_BINARY_FW struct fw_ver_info *fw_ver_info; #endif const u8 *modes_tree_buf; const union init_op *init_ops; const u32 *arr_data; u32 init_ops_size; }; enum ecore_mf_mode_bit { /* Supports PF-classification based on tag */ ECORE_MF_OVLAN_CLSS, /* Supports PF-classification based on MAC */ ECORE_MF_LLH_MAC_CLSS, /* Supports PF-classification based on protocol type */ ECORE_MF_LLH_PROTO_CLSS, /* Requires a default PF to be set */ ECORE_MF_NEED_DEF_PF, /* Allow LL2 to multicast/broadcast */ ECORE_MF_LL2_NON_UNICAST, /* Allow Cross-PF [& child VFs] Tx-switching */ ECORE_MF_INTER_PF_SWITCH, /* TODO - if we ever re-utilize any of this logic, we can rename */ ECORE_MF_UFP_SPECIFIC, ECORE_MF_DISABLE_ARFS, /* Use vlan for steering */ ECORE_MF_8021Q_TAGGING, /* Use stag for steering */ ECORE_MF_8021AD_TAGGING, }; enum ecore_ufp_mode { ECORE_UFP_MODE_ETS, ECORE_UFP_MODE_VNIC_BW, ECORE_UFP_MODE_UNKNOWN }; enum ecore_ufp_pri_type { ECORE_UFP_PRI_OS, ECORE_UFP_PRI_VNIC, ECORE_UFP_PRI_UNKNOWN }; struct ecore_ufp_info { enum ecore_ufp_pri_type pri_type; enum ecore_ufp_mode mode; u8 tc; }; enum BAR_ID { BAR_ID_0, /* used for GRC */ BAR_ID_1 /* Used for doorbells */ }; struct ecore_hwfn { struct ecore_dev *p_dev; u8 my_id; /* ID inside the PF */ #define IS_LEAD_HWFN(edev) (!((edev)->my_id)) u8 rel_pf_id; /* Relative to engine*/ u8 abs_pf_id; #define ECORE_PATH_ID(_p_hwfn) \ (ECORE_IS_BB((_p_hwfn)->p_dev) ? ((_p_hwfn)->abs_pf_id & 1) : 0) u8 port_id; bool b_active; u32 dp_module; u8 dp_level; char name[NAME_SIZE]; void *dp_ctx; bool hw_init_done; u8 num_funcs_on_engine; u8 enabled_func_idx; /* BAR access */ void OSAL_IOMEM *regview; void OSAL_IOMEM *doorbells; u64 db_phys_addr; unsigned long db_size; #ifndef LINUX_REMOVE u64 reg_offset; u64 db_offset; #endif /* PTT pool */ struct ecore_ptt_pool *p_ptt_pool; /* HW info */ struct ecore_hw_info hw_info; /* rt_array (for init-tool) */ struct ecore_rt_data rt_data; /* SPQ */ struct ecore_spq *p_spq; /* EQ */ struct ecore_eq *p_eq; /* Consolidate Q*/ struct ecore_consq *p_consq; /* Slow-Path definitions */ osal_dpc_t sp_dpc; bool b_sp_dpc_enabled; struct ecore_ptt *p_main_ptt; struct ecore_ptt *p_dpc_ptt; /* PTP will be used only by the leading function. * Usage of all PTP-apis should be synchronized as result. */ struct ecore_ptt *p_ptp_ptt; struct ecore_sb_sp_info *p_sp_sb; struct ecore_sb_attn_info *p_sb_attn; /* Protocol related */ bool using_ll2; struct ecore_ll2_info *p_ll2_info; struct ecore_ooo_info *p_ooo_info; struct ecore_iscsi_info *p_iscsi_info; struct ecore_fcoe_info *p_fcoe_info; struct ecore_rdma_info *p_rdma_info; struct ecore_pf_params pf_params; bool b_rdma_enabled_in_prs; u32 rdma_prs_search_reg; struct ecore_cxt_mngr *p_cxt_mngr; /* Flag indicating whether interrupts are enabled or not*/ bool b_int_enabled; bool b_int_requested; /* True if the driver requests for the link */ bool b_drv_link_init; struct ecore_vf_iov *vf_iov_info; struct ecore_pf_iov *pf_iov_info; struct ecore_mcp_info *mcp_info; struct ecore_dcbx_info *p_dcbx_info; struct ecore_ufp_info ufp_info; struct ecore_dmae_info dmae_info; /* QM init */ struct ecore_qm_info qm_info; /* Buffer for unzipping firmware data */ #ifdef CONFIG_ECORE_ZIPPED_FW void *unzip_buf; #endif struct dbg_tools_data dbg_info; /* PWM region specific data */ u16 wid_count; u32 dpi_size; u32 dpi_count; u32 dpi_start_offset; /* this is used to * calculate th * doorbell address */ /* If one of the following is set then EDPM shouldn't be used */ u8 dcbx_no_edpm; u8 db_bar_no_edpm; /* L2-related */ struct ecore_l2_info *p_l2_info; /* Mechanism for recovering from doorbell drop */ struct ecore_db_recovery_info db_recovery_info; }; #ifndef __EXTRACT__LINUX__THROW__ enum ecore_mf_mode { ECORE_MF_DEFAULT, ECORE_MF_OVLAN, ECORE_MF_NPAR, ECORE_MF_UFP, }; #endif #ifndef __EXTRACT__LINUX__IF__ enum ecore_dev_type { ECORE_DEV_TYPE_BB, ECORE_DEV_TYPE_AH, ECORE_DEV_TYPE_E5, }; #endif struct ecore_dev { u32 dp_module; u8 dp_level; char name[NAME_SIZE]; void *dp_ctx; + void *ha; enum ecore_dev_type type; /* Translate type/revision combo into the proper conditions */ #define ECORE_IS_BB(dev) ((dev)->type == ECORE_DEV_TYPE_BB) #define ECORE_IS_BB_A0(dev) (ECORE_IS_BB(dev) && CHIP_REV_IS_A0(dev)) #ifndef ASIC_ONLY #define ECORE_IS_BB_B0(dev) ((ECORE_IS_BB(dev) && CHIP_REV_IS_B0(dev)) || \ (CHIP_REV_IS_TEDIBEAR(dev))) #else #define ECORE_IS_BB_B0(dev) (ECORE_IS_BB(dev) && CHIP_REV_IS_B0(dev)) #endif #define ECORE_IS_AH(dev) ((dev)->type == ECORE_DEV_TYPE_AH) #define ECORE_IS_K2(dev) ECORE_IS_AH(dev) #define ECORE_IS_E4(dev) (ECORE_IS_BB(dev) || ECORE_IS_AH(dev)) #define ECORE_IS_E5(dev) ((dev)->type == ECORE_DEV_TYPE_E5) #define ECORE_E5_MISSING_CODE OSAL_BUILD_BUG_ON(false) u16 vendor_id; u16 device_id; #define ECORE_DEV_ID_MASK 0xff00 #define ECORE_DEV_ID_MASK_BB 0x1600 #define ECORE_DEV_ID_MASK_AH 0x8000 #define ECORE_DEV_ID_MASK_E5 0x8100 u16 chip_num; #define CHIP_NUM_MASK 0xffff #define CHIP_NUM_SHIFT 0 u8 chip_rev; #define CHIP_REV_MASK 0xf #define CHIP_REV_SHIFT 0 #ifndef ASIC_ONLY #define CHIP_REV_IS_TEDIBEAR(_p_dev) ((_p_dev)->chip_rev == 0x5) #define CHIP_REV_IS_EMUL_A0(_p_dev) ((_p_dev)->chip_rev == 0xe) #define CHIP_REV_IS_EMUL_B0(_p_dev) ((_p_dev)->chip_rev == 0xc) #define CHIP_REV_IS_EMUL(_p_dev) \ (CHIP_REV_IS_EMUL_A0(_p_dev) || CHIP_REV_IS_EMUL_B0(_p_dev)) #define CHIP_REV_IS_FPGA_A0(_p_dev) ((_p_dev)->chip_rev == 0xf) #define CHIP_REV_IS_FPGA_B0(_p_dev) ((_p_dev)->chip_rev == 0xd) #define CHIP_REV_IS_FPGA(_p_dev) \ (CHIP_REV_IS_FPGA_A0(_p_dev) || CHIP_REV_IS_FPGA_B0(_p_dev)) #define CHIP_REV_IS_SLOW(_p_dev) \ (CHIP_REV_IS_EMUL(_p_dev) || CHIP_REV_IS_FPGA(_p_dev)) #define CHIP_REV_IS_A0(_p_dev) \ (CHIP_REV_IS_EMUL_A0(_p_dev) || CHIP_REV_IS_FPGA_A0(_p_dev) || \ (!(_p_dev)->chip_rev && !(_p_dev)->chip_metal)) #define CHIP_REV_IS_B0(_p_dev) \ (CHIP_REV_IS_EMUL_B0(_p_dev) || CHIP_REV_IS_FPGA_B0(_p_dev) || \ ((_p_dev)->chip_rev == 1 && !(_p_dev)->chip_metal)) #define CHIP_REV_IS_ASIC(_p_dev) !CHIP_REV_IS_SLOW(_p_dev) #else #define CHIP_REV_IS_A0(_p_dev) \ (!(_p_dev)->chip_rev && !(_p_dev)->chip_metal) #define CHIP_REV_IS_B0(_p_dev) \ ((_p_dev)->chip_rev == 1 && !(_p_dev)->chip_metal) #endif u8 chip_metal; #define CHIP_METAL_MASK 0xff #define CHIP_METAL_SHIFT 0 u8 chip_bond_id; #define CHIP_BOND_ID_MASK 0xff #define CHIP_BOND_ID_SHIFT 0 u8 num_engines; u8 num_ports; u8 num_ports_in_engine; u8 num_funcs_in_port; u8 path_id; unsigned long mf_bits; #ifndef __EXTRACT__LINUX__THROW__ enum ecore_mf_mode mf_mode; #define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT) #define IS_MF_SI(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR) #define IS_MF_SD(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN) #endif int pcie_width; int pcie_speed; /* Add MF related configuration */ u8 mcp_rev; u8 boot_mode; /* WoL related configurations */ u8 wol_config; u8 wol_mac[ETH_ALEN]; u32 int_mode; enum ecore_coalescing_mode int_coalescing_mode; u16 rx_coalesce_usecs; u16 tx_coalesce_usecs; /* Start Bar offset of first hwfn */ void OSAL_IOMEM *regview; void OSAL_IOMEM *doorbells; u64 db_phys_addr; unsigned long db_size; /* PCI */ u8 cache_shift; /* Init */ const struct iro *iro_arr; #define IRO (p_hwfn->p_dev->iro_arr) /* HW functions */ u8 num_hwfns; struct ecore_hwfn hwfns[MAX_HWFNS_PER_DEVICE]; #define ECORE_LEADING_HWFN(dev) (&dev->hwfns[0]) #define ECORE_IS_CMT(dev) ((dev)->num_hwfns > 1) /* Engine affinity */ u8 l2_affin_hint; u8 fir_affin; u8 iwarp_affin; /* Macro for getting the engine-affinitized hwfn for FCoE/iSCSI/RoCE */ #define ECORE_FIR_AFFIN_HWFN(dev) (&dev->hwfns[dev->fir_affin]) /* Macro for getting the engine-affinitized hwfn for iWARP */ #define ECORE_IWARP_AFFIN_HWFN(dev) (&dev->hwfns[dev->iwarp_affin]) /* Generic macro for getting the engine-affinitized hwfn */ #define ECORE_AFFIN_HWFN(dev) \ (ECORE_IS_IWARP_PERSONALITY(ECORE_LEADING_HWFN(dev)) ? \ ECORE_IWARP_AFFIN_HWFN(dev) : \ ECORE_FIR_AFFIN_HWFN(dev)) /* Macro for getting the index (0/1) of the engine-affinitized hwfn */ #define ECORE_AFFIN_HWFN_IDX(dev) \ (IS_LEAD_HWFN(ECORE_AFFIN_HWFN(dev)) ? 0 : 1) /* SRIOV */ struct ecore_hw_sriov_info *p_iov_info; #define IS_ECORE_SRIOV(p_dev) (!!(p_dev)->p_iov_info) struct ecore_tunnel_info tunnel; bool b_is_vf; bool b_dont_override_vf_msix; u32 drv_type; u32 rdma_max_sge; u32 rdma_max_inline; u32 rdma_max_srq_sge; u8 ilt_page_size; struct ecore_eth_stats *reset_stats; struct ecore_fw_data *fw_data; u32 mcp_nvm_resp; /* Recovery */ bool recov_in_prog; /* Indicates whether should prevent attentions from being reasserted */ bool attn_clr_en; /* Indicates whether allowing the MFW to collect a crash dump */ bool allow_mdump; /* Indicates if the reg_fifo is checked after any register access */ bool chk_reg_fifo; #ifndef ASIC_ONLY bool b_is_emul_full; #endif /* LLH info */ u8 ppfid_bitmap; struct ecore_llh_info *p_llh_info; }; #define NUM_OF_VFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_VFS_BB \ : MAX_NUM_VFS_K2) #define NUM_OF_L2_QUEUES(dev) (ECORE_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \ : MAX_NUM_L2_QUEUES_K2) #define NUM_OF_PORTS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PORTS_BB \ : MAX_NUM_PORTS_K2) #define NUM_OF_SBS(dev) (ECORE_IS_BB(dev) ? MAX_SB_PER_PATH_BB \ : MAX_SB_PER_PATH_K2) #define NUM_OF_ENG_PFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PFS_BB \ : MAX_NUM_PFS_K2) #ifndef LINUX_REMOVE #define CRC8_TABLE_SIZE 256 #endif /** * @brief ecore_concrete_to_sw_fid - get the sw function id from * the concrete value. * * @param concrete_fid * * @return OSAL_INLINE u8 */ static OSAL_INLINE u8 ecore_concrete_to_sw_fid(u32 concrete_fid) { u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID); u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID); u8 vf_valid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID); u8 sw_fid; if (vf_valid) sw_fid = vfid + MAX_NUM_PFS; else sw_fid = pfid; return sw_fid; } #define PKT_LB_TC 9 #define MAX_NUM_VOQS_E4 20 int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate); void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev, struct ecore_ptt *p_ptt, u32 min_pf_rate); int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw); int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw); void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); int ecore_device_num_engines(struct ecore_dev *p_dev); int ecore_device_num_ports(struct ecore_dev *p_dev); void ecore_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb, u8 *mac); /* Flags for indication of required queues */ #define PQ_FLAGS_RLS (1 << 0) #define PQ_FLAGS_MCOS (1 << 1) #define PQ_FLAGS_LB (1 << 2) #define PQ_FLAGS_OOO (1 << 3) #define PQ_FLAGS_ACK (1 << 4) #define PQ_FLAGS_OFLD (1 << 5) #define PQ_FLAGS_VFS (1 << 6) #define PQ_FLAGS_LLT (1 << 7) /* physical queue index for cm context intialization */ u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags); u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc); u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf); u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 qpid); const char *ecore_hw_get_resc_name(enum ecore_resources res_id); /* doorbell recovery mechanism */ void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn); void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn, enum ecore_db_rec_exec db_exec); /* amount of resources used in qm init */ u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn); u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn); u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn); u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn); u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn); #define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \ ecore_device_num_ports((_p_hwfn)->p_dev)) /* The PFID<->PPFID calculation is based on the relative index of a PF on its * port. In BB there is a bug in the LLH in which the PPFID is actually engine * based, and thus it equals the PFID. */ #define ECORE_PFID_BY_PPFID(_p_hwfn, abs_ppfid) \ (ECORE_IS_BB((_p_hwfn)->p_dev) ? \ (abs_ppfid) : \ (abs_ppfid) * (_p_hwfn)->p_dev->num_ports_in_engine + \ MFW_PORT(_p_hwfn)) #define ECORE_PPFID_BY_PFID(_p_hwfn) \ (ECORE_IS_BB((_p_hwfn)->p_dev) ? \ (_p_hwfn)->rel_pf_id : \ (_p_hwfn)->rel_pf_id / (_p_hwfn)->p_dev->num_ports_in_engine) enum _ecore_status_t ecore_all_ppfids_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 addr, u32 val); /* Utility functions for dumping the content of the NIG LLH filters */ enum _ecore_status_t ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid); enum _ecore_status_t ecore_llh_dump_all(struct ecore_dev *p_dev); #endif /* __ECORE_H */ diff --git a/sys/dev/qlnx/qlnxe/ecore_mcp.c b/sys/dev/qlnx/qlnxe/ecore_mcp.c index d94c7382edc5..ab14b1eb5186 100644 --- a/sys/dev/qlnx/qlnxe/ecore_mcp.c +++ b/sys/dev/qlnx/qlnxe/ecore_mcp.c @@ -1,4788 +1,4788 @@ /* * Copyright (c) 2017-2018 Cavium, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * File : ecore_mcp.c */ #include #include "bcm_osal.h" #include "ecore.h" #include "ecore_status.h" #include "nvm_map.h" #include "nvm_cfg.h" #include "ecore_mcp.h" #include "mcp_public.h" #include "reg_addr.h" #include "ecore_hw.h" #include "ecore_init_fw_funcs.h" #include "ecore_sriov.h" #include "ecore_vf.h" #include "ecore_iov_api.h" #include "ecore_gtt_reg_addr.h" #include "ecore_iro.h" #include "ecore_dcbx.h" #include "ecore_sp_commands.h" #include "ecore_cxt.h" #define CHIP_MCP_RESP_ITER_US 10 #define EMUL_MCP_RESP_ITER_US 1000 * 1000 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \ ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \ _val) #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \ ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset)) #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \ DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \ OFFSETOF(struct public_drv_mb, _field), _val) #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \ DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \ OFFSETOF(struct public_drv_mb, _field)) #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \ DRV_ID_PDA_COMP_VER_OFFSET) #define MCP_BYTES_PER_MBIT_OFFSET 17 #ifdef _NTDDK_ #pragma warning(push) #pragma warning(disable : 28167) #pragma warning(disable : 28123) #endif #ifndef ASIC_ONLY static int loaded; static int loaded_port[MAX_NUM_PORTS] = { 0 }; #endif bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn) { if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base) return false; return true; } void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, PUBLIC_PORT); u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr); p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize, MFW_PORT(p_hwfn)); DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "port_addr = 0x%x, port_id 0x%02x\n", p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn)); } void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length); OSAL_BE32 tmp; u32 i; #ifndef ASIC_ONLY if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev)) return; #endif if (!p_hwfn->mcp_info->public_base) return; for (i = 0; i < length; i++) { tmp = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->mfw_mb_addr + (i << 2) + sizeof(u32)); ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] = OSAL_BE32_TO_CPU(tmp); } } struct ecore_mcp_cmd_elem { osal_list_entry_t list; struct ecore_mcp_mb_params *p_mb_params; u16 expected_seq_num; bool b_is_completed; }; /* Must be called while cmd_lock is acquired */ static struct ecore_mcp_cmd_elem * ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn, struct ecore_mcp_mb_params *p_mb_params, u16 expected_seq_num) { struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL; p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_cmd_elem)); if (!p_cmd_elem) { DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_mcp_cmd_elem'\n"); goto out; } p_cmd_elem->p_mb_params = p_mb_params; p_cmd_elem->expected_seq_num = expected_seq_num; OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list); out: return p_cmd_elem; } /* Must be called while cmd_lock is acquired */ static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn, struct ecore_mcp_cmd_elem *p_cmd_elem) { OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list); OSAL_FREE(p_hwfn->p_dev, p_cmd_elem); } /* Must be called while cmd_lock is acquired */ static struct ecore_mcp_cmd_elem * ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num) { struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL; OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list, struct ecore_mcp_cmd_elem) { if (p_cmd_elem->expected_seq_num == seq_num) return p_cmd_elem; } return OSAL_NULL; } enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn) { if (p_hwfn->mcp_info) { struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp; OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur); OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow); - OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); + OSAL_MUTEX_ACQUIRE(&p_hwfn->mcp_info->cmd_lock); OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp, &p_hwfn->mcp_info->cmd_list, list, struct ecore_mcp_cmd_elem) { ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); } - OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); + OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->cmd_lock); #ifdef CONFIG_ECORE_LOCK_ALLOC - OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock); - OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock); + OSAL_MUTEX_DEALLOC(&p_hwfn->mcp_info->cmd_lock); + OSAL_MUTEX_DEALLOC(&p_hwfn->mcp_info->link_lock); #endif } OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); p_hwfn->mcp_info = OSAL_NULL; return ECORE_SUCCESS; } /* Maximum of 1 sec to wait for the SHMEM ready indication */ #define ECPRE_MCP_SHMEM_RDY_MAX_RETRIES 20 #define ECORE_MCP_SHMEM_RDY_ITER_MS 50 enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { struct ecore_mcp_info *p_info = p_hwfn->mcp_info; u8 cnt = ECPRE_MCP_SHMEM_RDY_MAX_RETRIES; u8 msec = ECORE_MCP_SHMEM_RDY_ITER_MS; u32 drv_mb_offsize, mfw_mb_offsize; u32 mcp_pf_id = MCP_PF_ID(p_hwfn); #ifndef ASIC_ONLY if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n"); p_info->public_base = 0; return ECORE_INVAL; } #endif p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); if (!p_info->public_base) return ECORE_INVAL; p_info->public_base |= GRCBASE_MCP; /* Get the MFW MB address and number of supported messages */ mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, SECTION_OFFSIZE_ADDR(p_info->public_base, PUBLIC_MFW_MB)); p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr); /* @@@TBD: * The driver can notify that there was an MCP reset, and read the SHMEM * values before the MFW has completed initializing them. * As a temporary solution, the "sup_msgs" field is used as a data ready * indication. * This should be replaced with an actual indication when it is provided * by the MFW. */ while (!p_info->mfw_mb_length && cnt--) { OSAL_MSLEEP(msec); p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr); } if (!cnt) { DP_NOTICE(p_hwfn, false, "Failed to get the SHMEM ready notification after %d msec\n", ECPRE_MCP_SHMEM_RDY_MAX_RETRIES * msec); return ECORE_TIMEOUT; } /* Calculate the driver and MFW mailbox address */ drv_mb_offsize = ecore_rd(p_hwfn, p_ptt, SECTION_OFFSIZE_ADDR(p_info->public_base, PUBLIC_DRV_MB)); p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id); DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n", drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); /* Get the current driver mailbox sequence before sending * the first command */ p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) & DRV_MSG_SEQ_NUMBER_MASK; /* Get current FW pulse sequence */ p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) & DRV_PULSE_SEQ_MASK; p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { struct ecore_mcp_info *p_info; u32 size; /* Allocate mcp_info structure */ p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_hwfn->mcp_info)); if (!p_hwfn->mcp_info) { DP_NOTICE(p_hwfn, false, "Failed to allocate mcp_info\n"); return ECORE_NOMEM; } p_info = p_hwfn->mcp_info; /* Initialize the MFW spinlocks */ #ifdef CONFIG_ECORE_LOCK_ALLOC - if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) { + if (OSAL_MUTEX_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) { OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); return ECORE_NOMEM; } - if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock)) { - OSAL_SPIN_LOCK_DEALLOC(&p_info->cmd_lock); + if (OSAL_MUTEX_ALLOC(p_hwfn, &p_info->link_lock)) { + OSAL_MUTEX_DEALLOC(&p_info->cmd_lock); OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); return ECORE_NOMEM; } #endif - OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock); - OSAL_SPIN_LOCK_INIT(&p_info->link_lock); + OSAL_MUTEX_INIT(&p_info->cmd_lock); + OSAL_MUTEX_INIT(&p_info->link_lock); OSAL_LIST_INIT(&p_info->cmd_list); if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) { DP_NOTICE(p_hwfn, false, "MCP is not initialized\n"); /* Do not free mcp_info here, since public_base indicate that * the MCP is not initialized */ return ECORE_SUCCESS; } size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size); p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size); if (p_info->mfw_mb_cur == OSAL_NULL || p_info->mfw_mb_shadow == OSAL_NULL) goto err; return ECORE_SUCCESS; err: DP_NOTICE(p_hwfn, false, "Failed to allocate mcp memory\n"); ecore_mcp_free(p_hwfn); return ECORE_NOMEM; } static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); /* Use MCP history register to check if MCP reset occurred between init * time and now. */ if (p_hwfn->mcp_info->mcp_hist != generic_por_0) { DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n", p_hwfn->mcp_info->mcp_hist, generic_por_0); ecore_load_mcp_offsets(p_hwfn, p_ptt); ecore_mcp_cmd_port_init(p_hwfn, p_ptt); } } enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0; enum _ecore_status_t rc = ECORE_SUCCESS; #ifndef ASIC_ONLY if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) delay = EMUL_MCP_RESP_ITER_US; #endif if (p_hwfn->mcp_info->b_block_cmd) { DP_NOTICE(p_hwfn, false, "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n"); return ECORE_ABORTED; } /* Ensure that only a single thread is accessing the mailbox */ - OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); + OSAL_MUTEX_ACQUIRE(&p_hwfn->mcp_info->cmd_lock); org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); /* Set drv command along with the updated sequence */ ecore_mcp_reread_offsets(p_hwfn, p_ptt); seq = ++p_hwfn->mcp_info->drv_mb_seq; DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq)); do { /* Wait for MFW response */ OSAL_UDELAY(delay); /* Give the FW up to 500 second (50*1000*10usec) */ } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) && (cnt++ < ECORE_MCP_RESET_RETRIES)); if (org_mcp_reset_seq != ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) { DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "MCP was reset after %d usec\n", cnt * delay); } else { DP_ERR(p_hwfn, "Failed to reset MCP\n"); rc = ECORE_AGAIN; } - OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); + OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->cmd_lock); return rc; } /* Must be called while cmd_lock is acquired */ static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn) { struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL; /* There is at most one pending command at a certain time, and if it * exists - it is placed at the HEAD of the list. */ if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) { p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list, struct ecore_mcp_cmd_elem, list); return !p_cmd_elem->b_is_completed; } return false; } /* Must be called while cmd_lock is acquired */ static enum _ecore_status_t ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { struct ecore_mcp_mb_params *p_mb_params; struct ecore_mcp_cmd_elem *p_cmd_elem; u32 mcp_resp; u16 seq_num; mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header); seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK); /* Return if no new non-handled response has been received */ if (seq_num != p_hwfn->mcp_info->drv_mb_seq) return ECORE_AGAIN; p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num); if (!p_cmd_elem) { DP_ERR(p_hwfn, "Failed to find a pending mailbox cmd that expects sequence number %d\n", seq_num); return ECORE_UNKNOWN_ERROR; } p_mb_params = p_cmd_elem->p_mb_params; /* Get the MFW response along with the sequence number */ p_mb_params->mcp_resp = mcp_resp; /* Get the MFW param */ p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); /* Get the union data */ if (p_mb_params->p_data_dst != OSAL_NULL && p_mb_params->data_dst_size) { u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr + OFFSETOF(struct public_drv_mb, union_data); ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst, union_data_addr, p_mb_params->data_dst_size); } p_cmd_elem->b_is_completed = true; return ECORE_SUCCESS; } /* Must be called while cmd_lock is acquired */ static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_mcp_mb_params *p_mb_params, u16 seq_num) { union drv_union_data union_data; u32 union_data_addr; /* Set the union data */ union_data_addr = p_hwfn->mcp_info->drv_mb_addr + OFFSETOF(struct public_drv_mb, union_data); OSAL_MEM_ZERO(&union_data, sizeof(union_data)); if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size) OSAL_MEMCPY(&union_data, p_mb_params->p_data_src, p_mb_params->data_src_size); ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data, sizeof(union_data)); /* Set the drv param */ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param); /* Set the drv command along with the sequence number */ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num)); DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "MFW mailbox: command 0x%08x param 0x%08x\n", (p_mb_params->cmd | seq_num), p_mb_params->param); } static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn, bool block_cmd) { p_hwfn->mcp_info->b_block_cmd = block_cmd; DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n", block_cmd ? "Block" : "Unblock"); } static void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2; cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); OSAL_UDELAY(CHIP_MCP_RESP_ITER_US); cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); OSAL_UDELAY(CHIP_MCP_RESP_ITER_US); cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); DP_NOTICE(p_hwfn, false, "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n", cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2); } static enum _ecore_status_t _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_mcp_mb_params *p_mb_params, u32 max_retries, u32 usecs) { u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000); struct ecore_mcp_cmd_elem *p_cmd_elem; u16 seq_num; enum _ecore_status_t rc = ECORE_SUCCESS; /* Wait until the mailbox is non-occupied */ do { /* Exit the loop if there is no pending command, or if the * pending command is completed during this iteration. * The spinlock stays locked until the command is sent. */ - OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); + OSAL_MUTEX_ACQUIRE(&p_hwfn->mcp_info->cmd_lock); if (!ecore_mcp_has_pending_cmd(p_hwfn)) break; rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt); if (rc == ECORE_SUCCESS) break; else if (rc != ECORE_AGAIN) goto err; - OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); + OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->cmd_lock); if (ECORE_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) { OSAL_MSLEEP(msecs); } else { OSAL_UDELAY(usecs); } OSAL_MFW_CMD_PREEMPT(p_hwfn); } while (++cnt < max_retries); if (cnt >= max_retries) { DP_NOTICE(p_hwfn, false, "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n", p_mb_params->cmd, p_mb_params->param); return ECORE_AGAIN; } /* Send the mailbox command */ ecore_mcp_reread_offsets(p_hwfn, p_ptt); seq_num = ++p_hwfn->mcp_info->drv_mb_seq; p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num); if (!p_cmd_elem) { rc = ECORE_NOMEM; goto err; } __ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num); - OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); + OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->cmd_lock); /* Wait for the MFW response */ do { /* Exit the loop if the command is already completed, or if the * command is completed during this iteration. * The spinlock stays locked until the list element is removed. */ if (ECORE_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) { OSAL_MSLEEP(msecs); } else { OSAL_UDELAY(usecs); } - OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); + OSAL_MUTEX_ACQUIRE(&p_hwfn->mcp_info->cmd_lock); if (p_cmd_elem->b_is_completed) break; rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt); if (rc == ECORE_SUCCESS) break; else if (rc != ECORE_AGAIN) goto err; - OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); + OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->cmd_lock); OSAL_MFW_CMD_PREEMPT(p_hwfn); } while (++cnt < max_retries); if (cnt >= max_retries) { DP_NOTICE(p_hwfn, false, "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", p_mb_params->cmd, p_mb_params->param); ecore_mcp_print_cpu_info(p_hwfn, p_ptt); - OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); + OSAL_MUTEX_ACQUIRE(&p_hwfn->mcp_info->cmd_lock); ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); - OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); + OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->cmd_lock); if (!ECORE_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK)) ecore_mcp_cmd_set_blocking(p_hwfn, true); ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL); return ECORE_AGAIN; } ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); - OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); + OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->cmd_lock); DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", p_mb_params->mcp_resp, p_mb_params->mcp_param, (cnt * usecs) / 1000, (cnt * usecs) % 1000); /* Clear the sequence number from the MFW response */ p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; return ECORE_SUCCESS; err: - OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); + OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->cmd_lock); return rc; } static enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_mcp_mb_params *p_mb_params) { osal_size_t union_data_size = sizeof(union drv_union_data); u32 max_retries = ECORE_DRV_MB_MAX_RETRIES; u32 usecs = CHIP_MCP_RESP_ITER_US; #ifndef ASIC_ONLY if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) usecs = EMUL_MCP_RESP_ITER_US; /* There is a built-in delay of 100usec in each MFW response read */ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) max_retries /= 10; #endif if (ECORE_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) { max_retries = DIV_ROUND_UP(max_retries, 1000); usecs *= 1000; } /* MCP not initialized */ if (!ecore_mcp_is_init(p_hwfn)) { DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n"); return ECORE_BUSY; } if (p_mb_params->data_src_size > union_data_size || p_mb_params->data_dst_size > union_data_size) { DP_ERR(p_hwfn, "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n", p_mb_params->data_src_size, p_mb_params->data_dst_size, union_data_size); return ECORE_INVAL; } if (p_hwfn->mcp_info->b_block_cmd) { DP_NOTICE(p_hwfn, false, "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n", p_mb_params->cmd, p_mb_params->param); return ECORE_ABORTED; } return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, usecs); } enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 cmd, u32 param, u32 *o_mcp_resp, u32 *o_mcp_param) { struct ecore_mcp_mb_params mb_params; enum _ecore_status_t rc; #ifndef ASIC_ONLY if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { if (cmd == DRV_MSG_CODE_UNLOAD_REQ) { loaded--; loaded_port[p_hwfn->port_id]--; DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n", loaded); } return ECORE_SUCCESS; } #endif OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); mb_params.cmd = cmd; mb_params.param = param; rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc != ECORE_SUCCESS) return rc; *o_mcp_resp = mb_params.mcp_resp; *o_mcp_param = mb_params.mcp_param; return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 cmd, u32 param, u32 *o_mcp_resp, u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf) { struct ecore_mcp_mb_params mb_params; enum _ecore_status_t rc; OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); mb_params.cmd = cmd; mb_params.param = param; mb_params.p_data_src = i_buf; mb_params.data_src_size = (u8) i_txn_size; rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc != ECORE_SUCCESS) return rc; *o_mcp_resp = mb_params.mcp_resp; *o_mcp_param = mb_params.mcp_param; return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 cmd, u32 param, u32 *o_mcp_resp, u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf) { struct ecore_mcp_mb_params mb_params; u8 raw_data[MCP_DRV_NVM_BUF_LEN]; enum _ecore_status_t rc; OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); mb_params.cmd = cmd; mb_params.param = param; mb_params.p_data_dst = raw_data; /* Use the maximal value since the actual one is part of the response */ mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN; rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc != ECORE_SUCCESS) return rc; *o_mcp_resp = mb_params.mcp_resp; *o_mcp_param = mb_params.mcp_param; *o_txn_size = *o_mcp_param; OSAL_MEMCPY(o_buf, raw_data, *o_txn_size); return ECORE_SUCCESS; } #ifndef ASIC_ONLY static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn, u32 *p_load_code) { static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE; if (!loaded) { load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE; } else if (!loaded_port[p_hwfn->port_id]) { load_phase = FW_MSG_CODE_DRV_LOAD_PORT; } else { load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION; } /* On CMT, always tell that it's engine */ if (ECORE_IS_CMT(p_hwfn->p_dev)) load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE; *p_load_code = load_phase; loaded++; loaded_port[p_hwfn->port_id]++; DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n", *p_load_code, loaded, p_hwfn->port_id, loaded_port[p_hwfn->port_id]); } #endif static bool ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role, enum ecore_override_force_load override_force_load) { bool can_force_load = false; switch (override_force_load) { case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS: can_force_load = true; break; case ECORE_OVERRIDE_FORCE_LOAD_NEVER: can_force_load = false; break; default: can_force_load = (drv_role == DRV_ROLE_OS && exist_drv_role == DRV_ROLE_PREBOOT) || (drv_role == DRV_ROLE_KDUMP && exist_drv_role == DRV_ROLE_OS); break; } return can_force_load; } enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { u32 resp = 0, param = 0; enum _ecore_status_t rc; rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0, &resp, ¶m); if (rc != ECORE_SUCCESS) { DP_NOTICE(p_hwfn, false, "Failed to send cancel load request, rc = %d\n", rc); return rc; } if (resp == FW_MSG_CODE_UNSUPPORTED) { DP_INFO(p_hwfn, "The cancel load command is unsupported by the MFW\n"); return ECORE_NOTIMPL; } return ECORE_SUCCESS; } #define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0) #define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1) #define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2) #define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3) #define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4) #define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5) #define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6) static u32 ecore_get_config_bitmap(void) { u32 config_bitmap = 0x0; #ifdef CONFIG_ECORE_L2 config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX; #endif #ifdef CONFIG_ECORE_SRIOV config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX; #endif #ifdef CONFIG_ECORE_ROCE config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX; #endif #ifdef CONFIG_ECORE_IWARP config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX; #endif #ifdef CONFIG_ECORE_FCOE config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX; #endif #ifdef CONFIG_ECORE_ISCSI config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX; #endif #ifdef CONFIG_ECORE_LL2 config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX; #endif return config_bitmap; } struct ecore_load_req_in_params { u8 hsi_ver; #define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0 #define ECORE_LOAD_REQ_HSI_VER_1 1 u32 drv_ver_0; u32 drv_ver_1; u32 fw_ver; u8 drv_role; u8 timeout_val; u8 force_cmd; bool avoid_eng_reset; }; struct ecore_load_req_out_params { u32 load_code; u32 exist_drv_ver_0; u32 exist_drv_ver_1; u32 exist_fw_ver; u8 exist_drv_role; u8 mfw_hsi_ver; bool drv_exists; }; static enum _ecore_status_t __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_load_req_in_params *p_in_params, struct ecore_load_req_out_params *p_out_params) { struct ecore_mcp_mb_params mb_params; struct load_req_stc load_req; struct load_rsp_stc load_rsp; u32 hsi_ver; enum _ecore_status_t rc; OSAL_MEM_ZERO(&load_req, sizeof(load_req)); load_req.drv_ver_0 = p_in_params->drv_ver_0; load_req.drv_ver_1 = p_in_params->drv_ver_1; load_req.fw_ver = p_in_params->fw_ver; SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role); SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO, p_in_params->timeout_val); SET_MFW_FIELD(load_req.misc0, (u64)LOAD_REQ_FORCE, p_in_params->force_cmd); SET_MFW_FIELD(load_req.misc0, (u64)LOAD_REQ_FLAGS0, p_in_params->avoid_eng_reset); hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ? DRV_ID_MCP_HSI_VER_CURRENT : (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET); OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_LOAD_REQ; mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type; mb_params.p_data_src = &load_req; mb_params.data_src_size = sizeof(load_req); mb_params.p_data_dst = &load_rsp; mb_params.data_dst_size = sizeof(load_rsp); mb_params.flags = ECORE_MB_FLAG_CAN_SLEEP | ECORE_MB_FLAG_AVOID_BLOCK; DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", mb_params.param, GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW), GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE), GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER), GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER)); if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1) DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n", load_req.drv_ver_0, load_req.drv_ver_1, load_req.fw_ver, load_req.misc0, GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE), GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO), GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE), GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0)); rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc != ECORE_SUCCESS) { DP_NOTICE(p_hwfn, false, "Failed to send load request, rc = %d\n", rc); return rc; } DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Load Response: resp 0x%08x\n", mb_params.mcp_resp); p_out_params->load_code = mb_params.mcp_resp; if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 && p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) { DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n", load_rsp.drv_ver_0, load_rsp.drv_ver_1, load_rsp.fw_ver, load_rsp.misc0, GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE), GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI), GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0)); p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0; p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1; p_out_params->exist_fw_ver = load_rsp.fw_ver; p_out_params->exist_drv_role = GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE); p_out_params->mfw_hsi_ver = GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI); p_out_params->drv_exists = GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) & LOAD_RSP_FLAGS0_DRV_EXISTS; } return ECORE_SUCCESS; } static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role, u8 *p_mfw_drv_role) { switch (drv_role) { case ECORE_DRV_ROLE_OS: *p_mfw_drv_role = DRV_ROLE_OS; break; case ECORE_DRV_ROLE_KDUMP: *p_mfw_drv_role = DRV_ROLE_KDUMP; break; } } enum ecore_load_req_force { ECORE_LOAD_REQ_FORCE_NONE, ECORE_LOAD_REQ_FORCE_PF, ECORE_LOAD_REQ_FORCE_ALL, }; static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd, u8 *p_mfw_force_cmd) { switch (force_cmd) { case ECORE_LOAD_REQ_FORCE_NONE: *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE; break; case ECORE_LOAD_REQ_FORCE_PF: *p_mfw_force_cmd = LOAD_REQ_FORCE_PF; break; case ECORE_LOAD_REQ_FORCE_ALL: *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL; break; } } enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_load_req_params *p_params) { struct ecore_load_req_out_params out_params; struct ecore_load_req_in_params in_params; u8 mfw_drv_role = 0, mfw_force_cmd; enum _ecore_status_t rc; #ifndef ASIC_ONLY if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code); return ECORE_SUCCESS; } #endif OSAL_MEM_ZERO(&in_params, sizeof(in_params)); in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT; in_params.drv_ver_0 = ECORE_VERSION; in_params.drv_ver_1 = ecore_get_config_bitmap(); in_params.fw_ver = STORM_FW_VERSION; ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role); in_params.drv_role = mfw_drv_role; in_params.timeout_val = p_params->timeout_val; ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd); in_params.force_cmd = mfw_force_cmd; in_params.avoid_eng_reset = p_params->avoid_eng_reset; OSAL_MEM_ZERO(&out_params, sizeof(out_params)); rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params); if (rc != ECORE_SUCCESS) return rc; /* First handle cases where another load request should/might be sent: * - MFW expects the old interface [HSI version = 1] * - MFW responds that a force load request is required */ if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) { DP_INFO(p_hwfn, "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n"); in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1; OSAL_MEM_ZERO(&out_params, sizeof(out_params)); rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params); if (rc != ECORE_SUCCESS) return rc; } else if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) { if (ecore_mcp_can_force_load(in_params.drv_role, out_params.exist_drv_role, p_params->override_force_load)) { DP_INFO(p_hwfn, "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n", in_params.drv_role, in_params.fw_ver, in_params.drv_ver_0, in_params.drv_ver_1, out_params.exist_drv_role, out_params.exist_fw_ver, out_params.exist_drv_ver_0, out_params.exist_drv_ver_1); ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL, &mfw_force_cmd); in_params.force_cmd = mfw_force_cmd; OSAL_MEM_ZERO(&out_params, sizeof(out_params)); rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params); if (rc != ECORE_SUCCESS) return rc; } else { DP_NOTICE(p_hwfn, false, "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n", in_params.drv_role, in_params.fw_ver, in_params.drv_ver_0, in_params.drv_ver_1, out_params.exist_drv_role, out_params.exist_fw_ver, out_params.exist_drv_ver_0, out_params.exist_drv_ver_1); ecore_mcp_cancel_load_req(p_hwfn, p_ptt); return ECORE_BUSY; } } /* Now handle the other types of responses. * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not * expected here after the additional revised load requests were sent. */ switch (out_params.load_code) { case FW_MSG_CODE_DRV_LOAD_ENGINE: case FW_MSG_CODE_DRV_LOAD_PORT: case FW_MSG_CODE_DRV_LOAD_FUNCTION: if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 && out_params.drv_exists) { /* The role and fw/driver version match, but the PF is * already loaded and has not been unloaded gracefully. * This is unexpected since a quasi-FLR request was * previously sent as part of ecore_hw_prepare(). */ DP_NOTICE(p_hwfn, false, "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n"); return ECORE_INVAL; } break; default: DP_NOTICE(p_hwfn, false, "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n", out_params.load_code); return ECORE_BUSY; } p_params->load_code = out_params.load_code; return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { u32 resp = 0, param = 0; enum _ecore_status_t rc; rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp, ¶m); if (rc != ECORE_SUCCESS) { DP_NOTICE(p_hwfn, false, "Failed to send a LOAD_DONE command, rc = %d\n", rc); return rc; } if (resp == FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT) { DP_NOTICE(p_hwfn, false, "Received a LOAD_REFUSED_REJECT response from the mfw\n"); return ECORE_ABORTED; } /* Check if there is a DID mismatch between nvm-cfg/efuse */ if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) DP_NOTICE(p_hwfn, false, "warning: device configuration is not supported on this board type. The device may not function as expected.\n"); return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { struct ecore_mcp_mb_params mb_params; u32 wol_param; switch (p_hwfn->p_dev->wol_config) { case ECORE_OV_WOL_DISABLED: wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED; break; case ECORE_OV_WOL_ENABLED: wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED; break; default: DP_NOTICE(p_hwfn, true, "Unknown WoL configuration %02x\n", p_hwfn->p_dev->wol_config); /* Fallthrough */ case ECORE_OV_WOL_DEFAULT: wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; } OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ; mb_params.param = wol_param; mb_params.flags = ECORE_MB_FLAG_CAN_SLEEP | ECORE_MB_FLAG_AVOID_BLOCK; return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); } enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { struct ecore_mcp_mb_params mb_params; struct mcp_mac wol_mac; OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE; /* Set the primary MAC if WoL is enabled */ if (p_hwfn->p_dev->wol_config == ECORE_OV_WOL_ENABLED) { u8 *p_mac = p_hwfn->p_dev->wol_mac; OSAL_MEM_ZERO(&wol_mac, sizeof(wol_mac)); wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1]; wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 | p_mac[4] << 8 | p_mac[5]; DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFDOWN), "Setting WoL MAC: %02x:%02x:%02x:%02x:%02x:%02x --> [%08x,%08x]\n", p_mac[0], p_mac[1], p_mac[2], p_mac[3], p_mac[4], p_mac[5], wol_mac.mac_upper, wol_mac.mac_lower); mb_params.p_data_src = &wol_mac; mb_params.data_src_size = sizeof(wol_mac); } return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); } static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, PUBLIC_PATH); u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr); u32 path_addr = SECTION_ADDR(mfw_path_offsize, ECORE_PATH_ID(p_hwfn)); u32 disabled_vfs[VF_MAX_STATIC / 32]; int i; DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Reading Disabled VF information from [offset %08x], path_addr %08x\n", mfw_path_offsize, path_addr); for (i = 0; i < (VF_MAX_STATIC / 32); i++) { disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt, path_addr + OFFSETOF(struct public_path, mcp_vf_disabled) + sizeof(u32) * i); DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV), "FLR-ed VFs [%08x,...,%08x] - %08x\n", i * 32, (i + 1) * 32 - 1, disabled_vfs[i]); } if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs)) OSAL_VF_FLR_UPDATE(p_hwfn); } enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *vfs_to_ack) { u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, PUBLIC_FUNC); u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr); u32 func_addr = SECTION_ADDR(mfw_func_offsize, MCP_PF_ID(p_hwfn)); struct ecore_mcp_mb_params mb_params; enum _ecore_status_t rc; int i; for (i = 0; i < (VF_MAX_STATIC / 32); i++) DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV), "Acking VFs [%08x,...,%08x] - %08x\n", i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]); OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE; mb_params.p_data_src = vfs_to_ack; mb_params.data_src_size = VF_MAX_STATIC / 8; rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc != ECORE_SUCCESS) { DP_NOTICE(p_hwfn, false, "Failed to pass ACK for VF flr to MFW\n"); return ECORE_TIMEOUT; } /* TMP - clear the ACK bits; should be done by MFW */ for (i = 0; i < (VF_MAX_STATIC / 32); i++) ecore_wr(p_hwfn, p_ptt, func_addr + OFFSETOF(struct public_func, drv_ack_vf_disabled) + i * sizeof(u32), 0); return rc; } static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { u32 transceiver_state; transceiver_state = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + OFFSETOF(struct public_port, transceiver_data)); DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP), "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n", transceiver_state, (u32)(p_hwfn->mcp_info->port_addr + OFFSETOF(struct public_port, transceiver_data))); transceiver_state = GET_MFW_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE); if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) DP_NOTICE(p_hwfn, false, "Transceiver is present.\n"); else DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n"); OSAL_TRANSCEIVER_UPDATE(p_hwfn); } static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_mcp_link_state *p_link) { u32 eee_status, val; p_link->eee_adv_caps = 0; p_link->eee_lp_adv_caps = 0; eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + OFFSETOF(struct public_port, eee_status)); p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT); val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET; if (val & EEE_1G_ADV) p_link->eee_adv_caps |= ECORE_EEE_1G_ADV; if (val & EEE_10G_ADV) p_link->eee_adv_caps |= ECORE_EEE_10G_ADV; val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET; if (val & EEE_1G_ADV) p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV; if (val & EEE_10G_ADV) p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV; } static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct public_func *p_data, int pfid) { u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, PUBLIC_FUNC); u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr); u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid); u32 i, size; OSAL_MEM_ZERO(p_data, sizeof(*p_data)); size = OSAL_MIN_T(u32, sizeof(*p_data), SECTION_SIZE(mfw_path_offsize)); for (i = 0; i < size / sizeof(u32); i++) ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt, func_addr + (i << 2)); return size; } static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn, struct public_func *p_shmem_info) { struct ecore_mcp_function_info *p_info; p_info = &p_hwfn->mcp_info->func_info; /* TODO - bandwidth min/max should have valid values of 1-100, * as well as some indication that the feature is disabled. * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS * limit and correct value to min `1' and max `100' if limit isn't in * range. */ p_info->bandwidth_min = (p_shmem_info->config & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_OFFSET; if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) { DP_INFO(p_hwfn, "bandwidth minimum out of bounds [%02x]. Set to 1\n", p_info->bandwidth_min); p_info->bandwidth_min = 1; } p_info->bandwidth_max = (p_shmem_info->config & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_OFFSET; if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) { DP_INFO(p_hwfn, "bandwidth maximum out of bounds [%02x]. Set to 100\n", p_info->bandwidth_max); p_info->bandwidth_max = 100; } } static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, bool b_reset) { struct ecore_mcp_link_state *p_link; u8 max_bw, min_bw; u32 status = 0; /* Prevent SW/attentions from doing this at the same time */ - OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock); + OSAL_MUTEX_ACQUIRE(&p_hwfn->mcp_info->link_lock); p_link = &p_hwfn->mcp_info->link_output; OSAL_MEMSET(p_link, 0, sizeof(*p_link)); if (!b_reset) { status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + OFFSETOF(struct public_port, link_status)); DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP), "Received link update [0x%08x] from mfw [Addr 0x%x]\n", status, (u32)(p_hwfn->mcp_info->port_addr + OFFSETOF(struct public_port, link_status))); } else { DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link indications\n"); goto out; } if (p_hwfn->b_drv_link_init) { /* Link indication with modern MFW arrives as per-PF * indication. */ if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_VLINK) { struct public_func shmem_info; ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); p_link->link_up = !!(shmem_info.status & FUNC_STATUS_VIRTUAL_LINK_UP); ecore_read_pf_bandwidth(p_hwfn, &shmem_info); } else { p_link->link_up = !!(status & LINK_STATUS_LINK_UP); } } else { p_link->link_up = false; } p_link->full_duplex = true; switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) { case LINK_STATUS_SPEED_AND_DUPLEX_100G: p_link->speed = 100000; break; case LINK_STATUS_SPEED_AND_DUPLEX_50G: p_link->speed = 50000; break; case LINK_STATUS_SPEED_AND_DUPLEX_40G: p_link->speed = 40000; break; case LINK_STATUS_SPEED_AND_DUPLEX_25G: p_link->speed = 25000; break; case LINK_STATUS_SPEED_AND_DUPLEX_20G: p_link->speed = 20000; break; case LINK_STATUS_SPEED_AND_DUPLEX_10G: p_link->speed = 10000; break; case LINK_STATUS_SPEED_AND_DUPLEX_1000THD: p_link->full_duplex = false; /* Fall-through */ case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD: p_link->speed = 1000; break; default: p_link->speed = 0; p_link->link_up = 0; } /* We never store total line speed as p_link->speed is * again changes according to bandwidth allocation. */ if (p_link->link_up && p_link->speed) p_link->line_speed = p_link->speed; else p_link->line_speed = 0; max_bw = p_hwfn->mcp_info->func_info.bandwidth_max; min_bw = p_hwfn->mcp_info->func_info.bandwidth_min; /* Max bandwidth configuration */ __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw); /* Min bandwidth configuration */ __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw); ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt, p_link->min_pf_rate); p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE); p_link->parallel_detection = !!(status & LINK_STATUS_PARALLEL_DETECTION_USED); p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED); p_link->partner_adv_speed |= (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ? ECORE_LINK_PARTNER_SPEED_1G_FD : 0; p_link->partner_adv_speed |= (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ? ECORE_LINK_PARTNER_SPEED_1G_HD : 0; p_link->partner_adv_speed |= (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ? ECORE_LINK_PARTNER_SPEED_10G : 0; p_link->partner_adv_speed |= (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ? ECORE_LINK_PARTNER_SPEED_20G : 0; p_link->partner_adv_speed |= (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ? ECORE_LINK_PARTNER_SPEED_25G : 0; p_link->partner_adv_speed |= (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ? ECORE_LINK_PARTNER_SPEED_40G : 0; p_link->partner_adv_speed |= (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ? ECORE_LINK_PARTNER_SPEED_50G : 0; p_link->partner_adv_speed |= (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ? ECORE_LINK_PARTNER_SPEED_100G : 0; p_link->partner_tx_flow_ctrl_en = !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED); p_link->partner_rx_flow_ctrl_en = !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED); switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) { case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE: p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE; break; case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE: p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE; break; case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE: p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE; break; default: p_link->partner_adv_pause = 0; } p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT); if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link); OSAL_LINK_UPDATE(p_hwfn, p_ptt); out: - OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock); + OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->link_lock); } enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, bool b_up) { struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input; struct ecore_mcp_mb_params mb_params; struct eth_phy_cfg phy_cfg; enum _ecore_status_t rc = ECORE_SUCCESS; u32 cmd; #ifndef ASIC_ONLY if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) return ECORE_SUCCESS; #endif /* Set the shmem configuration according to params */ OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg)); cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET; if (!params->speed.autoneg) phy_cfg.speed = params->speed.forced_speed; phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0; phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0; phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; phy_cfg.adv_speed = params->speed.advertised_speeds; phy_cfg.loopback_mode = params->loopback_mode; /* There are MFWs that share this capability regardless of whether * this is feasible or not. And given that at the very least adv_caps * would be set internally by ecore, we want to make sure LFA would * still work. */ if ((p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) { phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED; if (params->eee.tx_lpi_enable) phy_cfg.eee_cfg |= EEE_CFG_TX_LPI; if (params->eee.adv_caps & ECORE_EEE_1G_ADV) phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G; if (params->eee.adv_caps & ECORE_EEE_10G_ADV) phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G; phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer << EEE_TX_TIMER_USEC_OFFSET) & EEE_TX_TIMER_USEC_MASK; } p_hwfn->b_drv_link_init = b_up; if (b_up) DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n", phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed, phy_cfg.loopback_mode); else DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n"); OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); mb_params.cmd = cmd; mb_params.p_data_src = &phy_cfg; mb_params.data_src_size = sizeof(phy_cfg); rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); /* if mcp fails to respond we must abort */ if (rc != ECORE_SUCCESS) { DP_ERR(p_hwfn, "MCP response failure, aborting\n"); return rc; } /* Mimic link-change attention, done for several reasons: * - On reset, there's no guarantee MFW would trigger * an attention. * - On initialization, older MFWs might not indicate link change * during LFA, so we'll never get an UP indication. */ ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up); return ECORE_SUCCESS; } u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt; /* TODO - Add support for VFs */ if (IS_VF(p_hwfn->p_dev)) return ECORE_INVAL; path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, PUBLIC_PATH); path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr); path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn)); proc_kill_cnt = ecore_rd(p_hwfn, p_ptt, path_addr + OFFSETOF(struct public_path, process_kill)) & PROCESS_KILL_COUNTER_MASK; return proc_kill_cnt; } static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { struct ecore_dev *p_dev = p_hwfn->p_dev; u32 proc_kill_cnt; /* Prevent possible attentions/interrupts during the recovery handling * and till its load phase, during which they will be re-enabled. */ ecore_int_igu_disable_int(p_hwfn, p_ptt); DP_NOTICE(p_hwfn, false, "Received a process kill indication\n"); /* The following operations should be done once, and thus in CMT mode * are carried out by only the first HW function. */ if (p_hwfn != ECORE_LEADING_HWFN(p_dev)) return; if (p_dev->recov_in_prog) { DP_NOTICE(p_hwfn, false, "Ignoring the indication since a recovery process is already in progress\n"); return; } p_dev->recov_in_prog = true; proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt); DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt); OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn); } static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, enum MFW_DRV_MSG_TYPE type) { enum ecore_mcp_protocol_type stats_type; union ecore_mcp_protocol_stats stats; struct ecore_mcp_mb_params mb_params; u32 hsi_param; enum _ecore_status_t rc; switch (type) { case MFW_DRV_MSG_GET_LAN_STATS: stats_type = ECORE_MCP_LAN_STATS; hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN; break; case MFW_DRV_MSG_GET_FCOE_STATS: stats_type = ECORE_MCP_FCOE_STATS; hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE; break; case MFW_DRV_MSG_GET_ISCSI_STATS: stats_type = ECORE_MCP_ISCSI_STATS; hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI; break; case MFW_DRV_MSG_GET_RDMA_STATS: stats_type = ECORE_MCP_RDMA_STATS; hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA; break; default: DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Invalid protocol type %d\n", type); return; } OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats); OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_GET_STATS; mb_params.param = hsi_param; mb_params.p_data_src = &stats; mb_params.data_src_size = sizeof(stats); rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc != ECORE_SUCCESS) DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc); } static void ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { struct ecore_mcp_function_info *p_info; struct public_func shmem_info; u32 resp = 0, param = 0; - OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock); + OSAL_MUTEX_ACQUIRE(&p_hwfn->mcp_info->link_lock); ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); ecore_read_pf_bandwidth(p_hwfn, &shmem_info); p_info = &p_hwfn->mcp_info->func_info; ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min); ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max); - OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock); + OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->link_lock); /* Acknowledge the MFW */ ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp, ¶m); } static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { struct public_func shmem_info; u32 resp = 0, param = 0; ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK; p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan; if ((p_hwfn->hw_info.hw_mode & (1 << MODE_MF_SD)) && (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET)) { ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, p_hwfn->hw_info.ovlan); ecore_sp_pf_update_stag(p_hwfn); /* Configure doorbell to add external vlan to EDPM packets */ ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1); ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, p_hwfn->hw_info.ovlan); } DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "ovlan = %d hw_mode = 0x%x\n", p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode); OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN); /* Acknowledge the MFW */ ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0, &resp, ¶m); } static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn) { /* A single notification should be sent to upper driver in CMT mode */ if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev)) return; DP_NOTICE(p_hwfn, false, "Fan failure was detected on the network interface card and it's going to be shut down.\n"); ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL); } struct ecore_mdump_cmd_params { u32 cmd; void *p_data_src; u8 data_src_size; void *p_data_dst; u8 data_dst_size; u32 mcp_resp; }; static enum _ecore_status_t ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_mdump_cmd_params *p_mdump_cmd_params) { struct ecore_mcp_mb_params mb_params; enum _ecore_status_t rc; OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD; mb_params.param = p_mdump_cmd_params->cmd; mb_params.p_data_src = p_mdump_cmd_params->p_data_src; mb_params.data_src_size = p_mdump_cmd_params->data_src_size; mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst; mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size; rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc != ECORE_SUCCESS) return rc; p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp; if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) { DP_INFO(p_hwfn, "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n", p_mdump_cmd_params->cmd); rc = ECORE_NOTIMPL; } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) { DP_INFO(p_hwfn, "The mdump command is not supported by the MFW\n"); rc = ECORE_NOTIMPL; } return rc; } static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { struct ecore_mdump_cmd_params mdump_cmd_params; OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK; return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); } enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 epoch) { struct ecore_mdump_cmd_params mdump_cmd_params; OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES; mdump_cmd_params.p_data_src = &epoch; mdump_cmd_params.data_src_size = sizeof(epoch); return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); } enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { struct ecore_mdump_cmd_params mdump_cmd_params; OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER; return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); } static enum _ecore_status_t ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct mdump_config_stc *p_mdump_config) { struct ecore_mdump_cmd_params mdump_cmd_params; enum _ecore_status_t rc; OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG; mdump_cmd_params.p_data_dst = p_mdump_config; mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config); rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); if (rc != ECORE_SUCCESS) return rc; if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) { DP_INFO(p_hwfn, "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n", mdump_cmd_params.mcp_resp); rc = ECORE_UNKNOWN_ERROR; } return rc; } enum _ecore_status_t ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_mdump_info *p_mdump_info) { u32 addr, global_offsize, global_addr; struct mdump_config_stc mdump_config; enum _ecore_status_t rc; OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info)); addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, PUBLIC_GLOBAL); global_offsize = ecore_rd(p_hwfn, p_ptt, addr); global_addr = SECTION_ADDR(global_offsize, 0); p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt, global_addr + OFFSETOF(struct public_global, mdump_reason)); if (p_mdump_info->reason) { rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config); if (rc != ECORE_SUCCESS) return rc; p_mdump_info->version = mdump_config.version; p_mdump_info->config = mdump_config.config; p_mdump_info->epoch = mdump_config.epoc; p_mdump_info->num_of_logs = mdump_config.num_of_logs; p_mdump_info->valid_logs = mdump_config.valid_logs; DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n", p_mdump_info->reason, p_mdump_info->version, p_mdump_info->config, p_mdump_info->epoch, p_mdump_info->num_of_logs, p_mdump_info->valid_logs); } else { DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "MFW mdump info: reason %d\n", p_mdump_info->reason); } return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { struct ecore_mdump_cmd_params mdump_cmd_params; OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS; return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); } enum _ecore_status_t ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_mdump_retain_data *p_mdump_retain) { struct ecore_mdump_cmd_params mdump_cmd_params; struct mdump_retain_data_stc mfw_mdump_retain; enum _ecore_status_t rc; OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN; mdump_cmd_params.p_data_dst = &mfw_mdump_retain; mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain); rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); if (rc != ECORE_SUCCESS) return rc; if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) { DP_INFO(p_hwfn, "Failed to get the mdump retained data [mcp_resp 0x%x]\n", mdump_cmd_params.mcp_resp); return ECORE_UNKNOWN_ERROR; } p_mdump_retain->valid = mfw_mdump_retain.valid; p_mdump_retain->epoch = mfw_mdump_retain.epoch; p_mdump_retain->pf = mfw_mdump_retain.pf; p_mdump_retain->status = mfw_mdump_retain.status; return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { struct ecore_mdump_cmd_params mdump_cmd_params; OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN; return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); } static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { struct ecore_mdump_retain_data mdump_retain; enum _ecore_status_t rc; /* In CMT mode - no need for more than a single acknowledgement to the * MFW, and no more than a single notification to the upper driver. */ if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev)) return; rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain); if (rc == ECORE_SUCCESS && mdump_retain.valid) { DP_NOTICE(p_hwfn, false, "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n", mdump_retain.epoch, mdump_retain.pf, mdump_retain.status); } else { DP_NOTICE(p_hwfn, false, "The MFW notified that a critical error occurred in the device\n"); } if (p_hwfn->p_dev->allow_mdump) { DP_NOTICE(p_hwfn, false, "Not acknowledging the notification to allow the MFW crash dump\n"); return; } DP_NOTICE(p_hwfn, false, "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n"); ecore_mcp_mdump_ack(p_hwfn, p_ptt); ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN); } void ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { struct public_func shmem_info; u32 port_cfg, val; if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) return; OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info)); port_cfg = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + OFFSETOF(struct public_port, oem_cfg_port)); val = GET_MFW_FIELD(port_cfg, OEM_CFG_CHANNEL_TYPE); if (val != OEM_CFG_CHANNEL_TYPE_STAGGED) DP_NOTICE(p_hwfn, false, "Incorrect UFP Channel type %d\n", val); val = GET_MFW_FIELD(port_cfg, OEM_CFG_SCHED_TYPE); if (val == OEM_CFG_SCHED_TYPE_ETS) p_hwfn->ufp_info.mode = ECORE_UFP_MODE_ETS; else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) p_hwfn->ufp_info.mode = ECORE_UFP_MODE_VNIC_BW; else { p_hwfn->ufp_info.mode = ECORE_UFP_MODE_UNKNOWN; DP_NOTICE(p_hwfn, false, "Unknown UFP scheduling mode %d\n", val); } ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_TC); p_hwfn->ufp_info.tc = (u8)val; val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_HOST_PRI_CTRL); if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_VNIC; else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_OS; else { p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_UNKNOWN; DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n", val); } DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "UFP shmem config: mode = %d tc = %d pri_type = %d\n", p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc, p_hwfn->ufp_info.pri_type); } static enum _ecore_status_t ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { ecore_mcp_read_ufp_config(p_hwfn, p_ptt); if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_VNIC_BW) { p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc; p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc; ecore_qm_reconf(p_hwfn, p_ptt); } else if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_ETS) { /* Merge UFP TC with the dcbx TC data */ ecore_dcbx_mib_update_event(p_hwfn, p_ptt, ECORE_DCBX_OPERATIONAL_MIB); } else { DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n"); return ECORE_INVAL; } /* update storm FW with negotiation results */ ecore_sp_pf_update_ufp(p_hwfn); return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { struct ecore_mcp_info *info = p_hwfn->mcp_info; enum _ecore_status_t rc = ECORE_SUCCESS; bool found = false; u16 i; DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n"); /* Read Messages from MFW */ ecore_mcp_read_mb(p_hwfn, p_ptt); /* Compare current messages to old ones */ for (i = 0; i < info->mfw_mb_length; i++) { if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i]) continue; found = true; DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n", i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]); switch (i) { case MFW_DRV_MSG_LINK_CHANGE: ecore_mcp_handle_link_change(p_hwfn, p_ptt, false); break; case MFW_DRV_MSG_VF_DISABLED: ecore_mcp_handle_vf_flr(p_hwfn, p_ptt); break; case MFW_DRV_MSG_LLDP_DATA_UPDATED: ecore_dcbx_mib_update_event(p_hwfn, p_ptt, ECORE_DCBX_REMOTE_LLDP_MIB); break; case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED: ecore_dcbx_mib_update_event(p_hwfn, p_ptt, ECORE_DCBX_REMOTE_MIB); break; case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED: ecore_dcbx_mib_update_event(p_hwfn, p_ptt, ECORE_DCBX_OPERATIONAL_MIB); /* clear the user-config cache */ OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0, sizeof(struct ecore_dcbx_set)); break; case MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED: ecore_lldp_mib_update_event(p_hwfn, p_ptt); break; case MFW_DRV_MSG_OEM_CFG_UPDATE: ecore_mcp_handle_ufp_event(p_hwfn, p_ptt); break; case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt); break; case MFW_DRV_MSG_ERROR_RECOVERY: ecore_mcp_handle_process_kill(p_hwfn, p_ptt); break; case MFW_DRV_MSG_GET_LAN_STATS: case MFW_DRV_MSG_GET_FCOE_STATS: case MFW_DRV_MSG_GET_ISCSI_STATS: case MFW_DRV_MSG_GET_RDMA_STATS: ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i); break; case MFW_DRV_MSG_BW_UPDATE: ecore_mcp_update_bw(p_hwfn, p_ptt); break; case MFW_DRV_MSG_S_TAG_UPDATE: ecore_mcp_update_stag(p_hwfn, p_ptt); break; case MFW_DRV_MSG_FAILURE_DETECTED: ecore_mcp_handle_fan_failure(p_hwfn); break; case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED: ecore_mcp_handle_critical_error(p_hwfn, p_ptt); break; case MFW_DRV_MSG_GET_TLV_REQ: OSAL_MFW_TLV_REQ(p_hwfn); break; default: DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i); rc = ECORE_INVAL; } } /* ACK everything */ for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) { OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]); /* MFW expect answer in BE, so we force write in that format */ ecore_wr(p_hwfn, p_ptt, info->mfw_mb_addr + sizeof(u32) + MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) * sizeof(u32) + i * sizeof(u32), val); } if (!found) { DP_INFO(p_hwfn, "Received an MFW message indication but no new message!\n"); rc = ECORE_INVAL; } /* Copy the new mfw messages into the shadow */ OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length); return rc; } enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *p_mfw_ver, u32 *p_running_bundle_id) { u32 global_offsize; #ifndef ASIC_ONLY if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n"); return ECORE_SUCCESS; } #endif if (IS_VF(p_hwfn->p_dev)) { if (p_hwfn->vf_iov_info) { struct pfvf_acquire_resp_tlv *p_resp; p_resp = &p_hwfn->vf_iov_info->acquire_resp; *p_mfw_ver = p_resp->pfdev_info.mfw_ver; return ECORE_SUCCESS; } else { DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "VF requested MFW version prior to ACQUIRE\n"); return ECORE_INVAL; } } global_offsize = ecore_rd(p_hwfn, p_ptt, SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, PUBLIC_GLOBAL)); *p_mfw_ver = ecore_rd(p_hwfn, p_ptt, SECTION_ADDR(global_offsize, 0) + OFFSETOF(struct public_global, mfw_ver)); if (p_running_bundle_id != OSAL_NULL) { *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt, SECTION_ADDR(global_offsize, 0) + OFFSETOF(struct public_global, running_bundle_id)); } return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_get_mbi_ver(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *p_mbi_ver) { u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr; #ifndef ASIC_ONLY if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { DP_NOTICE(p_hwfn, false, "Emulation - can't get MBI version\n"); return ECORE_SUCCESS; } #endif if (IS_VF(p_hwfn->p_dev)) return ECORE_INVAL; /* Read the address of the nvm_cfg */ nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); if (!nvm_cfg_addr) { DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n"); return ECORE_INVAL; } /* Read the offset of nvm_cfg1 */ nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + OFFSETOF(struct nvm_cfg1, glob) + OFFSETOF(struct nvm_cfg1_glob, mbi_version); *p_mbi_ver = ecore_rd(p_hwfn, p_ptt, mbi_ver_addr) & (NVM_CFG1_GLOB_MBI_VERSION_0_MASK | NVM_CFG1_GLOB_MBI_VERSION_1_MASK | NVM_CFG1_GLOB_MBI_VERSION_2_MASK); return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *p_media_type) { /* TODO - Add support for VFs */ if (IS_VF(p_hwfn->p_dev)) return ECORE_INVAL; if (!ecore_mcp_is_init(p_hwfn)) { DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); return ECORE_BUSY; } if (!p_ptt) { *p_media_type = MEDIA_UNSPECIFIED; return ECORE_INVAL; } else { *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + OFFSETOF(struct public_port, media_type)); } return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *p_tranceiver_type) { /* TODO - Add support for VFs */ if (IS_VF(p_hwfn->p_dev)) return ECORE_INVAL; if (!ecore_mcp_is_init(p_hwfn)) { DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); return ECORE_BUSY; } if (!p_ptt) { *p_tranceiver_type = ETH_TRANSCEIVER_TYPE_NONE; return ECORE_INVAL; } else { *p_tranceiver_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + offsetof(struct public_port, transceiver_data)); } return 0; } static int is_transceiver_ready(u32 transceiver_state, u32 transceiver_type) { if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) && ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) && (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE)) { return 1; } return 0; } enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *p_speed_mask) { u32 transceiver_data, transceiver_type, transceiver_state; ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_data); transceiver_state = GET_MFW_FIELD(transceiver_data, ETH_TRANSCEIVER_STATE); transceiver_type = GET_MFW_FIELD(transceiver_data, ETH_TRANSCEIVER_TYPE); if (is_transceiver_ready(transceiver_state, transceiver_type) == 0) { return ECORE_INVAL; } switch (transceiver_type) { case ETH_TRANSCEIVER_TYPE_1G_LX: case ETH_TRANSCEIVER_TYPE_1G_SX: case ETH_TRANSCEIVER_TYPE_1G_PCC: case ETH_TRANSCEIVER_TYPE_1G_ACC: case ETH_TRANSCEIVER_TYPE_1000BASET: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; break; case ETH_TRANSCEIVER_TYPE_10G_SR: case ETH_TRANSCEIVER_TYPE_10G_LR: case ETH_TRANSCEIVER_TYPE_10G_LRM: case ETH_TRANSCEIVER_TYPE_10G_ER: case ETH_TRANSCEIVER_TYPE_10G_PCC: case ETH_TRANSCEIVER_TYPE_10G_ACC: case ETH_TRANSCEIVER_TYPE_4x10G: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; break; case ETH_TRANSCEIVER_TYPE_40G_LR4: case ETH_TRANSCEIVER_TYPE_40G_SR4: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; break; case ETH_TRANSCEIVER_TYPE_100G_AOC: case ETH_TRANSCEIVER_TYPE_100G_SR4: case ETH_TRANSCEIVER_TYPE_100G_LR4: case ETH_TRANSCEIVER_TYPE_100G_ER4: case ETH_TRANSCEIVER_TYPE_100G_ACC: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; break; case ETH_TRANSCEIVER_TYPE_25G_SR: case ETH_TRANSCEIVER_TYPE_25G_LR: case ETH_TRANSCEIVER_TYPE_25G_AOC: case ETH_TRANSCEIVER_TYPE_25G_ACC_S: case ETH_TRANSCEIVER_TYPE_25G_ACC_M: case ETH_TRANSCEIVER_TYPE_25G_ACC_L: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; break; case ETH_TRANSCEIVER_TYPE_25G_CA_N: case ETH_TRANSCEIVER_TYPE_25G_CA_S: case ETH_TRANSCEIVER_TYPE_25G_CA_L: case ETH_TRANSCEIVER_TYPE_4x25G_CR: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; break; case ETH_TRANSCEIVER_TYPE_40G_CR4: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; break; case ETH_TRANSCEIVER_TYPE_100G_CR4: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; break; case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; break; case ETH_TRANSCEIVER_TYPE_XLPPI: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; break; case ETH_TRANSCEIVER_TYPE_10G_BASET: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; break; default: DP_INFO(p_hwfn, "Unknown transcevier type 0x%x\n", transceiver_type); *p_speed_mask = 0xff; break; } return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *p_board_config) { u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr; /* TODO - Add support for VFs */ if (IS_VF(p_hwfn->p_dev)) return ECORE_INVAL; if (!ecore_mcp_is_init(p_hwfn)) { DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); return ECORE_BUSY; } if (!p_ptt) { *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; return ECORE_INVAL; } else { nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); *p_board_config = ecore_rd(p_hwfn, p_ptt, port_cfg_addr + offsetof(struct nvm_cfg1_port, board_cfg)); } return ECORE_SUCCESS; } /* Old MFW has a global configuration for all PFs regarding RDMA support */ static void ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn, enum ecore_pci_personality *p_proto) { /* There wasn't ever a legacy MFW that published iwarp. * So at this point, this is either plain l2 or RoCE. */ if (OSAL_TEST_BIT(ECORE_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities)) *p_proto = ECORE_PCI_ETH_ROCE; else *p_proto = ECORE_PCI_ETH; DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "According to Legacy capabilities, L2 personality is %08x\n", (u32) *p_proto); } static enum _ecore_status_t ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, enum ecore_pci_personality *p_proto) { u32 resp = 0, param = 0; enum _ecore_status_t rc; rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, ¶m); if (rc != ECORE_SUCCESS) return rc; if (resp != FW_MSG_CODE_OK) { DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "MFW lacks support for command; Returns %08x\n", resp); return ECORE_INVAL; } switch (param) { case FW_MB_PARAM_GET_PF_RDMA_NONE: *p_proto = ECORE_PCI_ETH; break; case FW_MB_PARAM_GET_PF_RDMA_ROCE: *p_proto = ECORE_PCI_ETH_ROCE; break; case FW_MB_PARAM_GET_PF_RDMA_IWARP: *p_proto = ECORE_PCI_ETH_IWARP; break; case FW_MB_PARAM_GET_PF_RDMA_BOTH: *p_proto = ECORE_PCI_ETH_RDMA; break; default: DP_NOTICE(p_hwfn, true, "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n", param); return ECORE_INVAL; } DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n", (u32) *p_proto, resp, param); return ECORE_SUCCESS; } static enum _ecore_status_t ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn, struct public_func *p_info, struct ecore_ptt *p_ptt, enum ecore_pci_personality *p_proto) { enum _ecore_status_t rc = ECORE_SUCCESS; switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) { case FUNC_MF_CFG_PROTOCOL_ETHERNET: if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) != ECORE_SUCCESS) ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto); break; case FUNC_MF_CFG_PROTOCOL_ISCSI: *p_proto = ECORE_PCI_ISCSI; break; case FUNC_MF_CFG_PROTOCOL_FCOE: *p_proto = ECORE_PCI_FCOE; break; case FUNC_MF_CFG_PROTOCOL_ROCE: DP_NOTICE(p_hwfn, true, "RoCE personality is not a valid value!\n"); /* Fallthrough */ default: rc = ECORE_INVAL; } return rc; } enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { struct ecore_mcp_function_info *info; struct public_func shmem_info; ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); info = &p_hwfn->mcp_info->func_info; info->pause_on_host = (shmem_info.config & FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0; if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt, &info->protocol)) { DP_ERR(p_hwfn, "Unknown personality %08x\n", (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK)); return ECORE_INVAL; } ecore_read_pf_bandwidth(p_hwfn, &shmem_info); if (shmem_info.mac_upper || shmem_info.mac_lower) { info->mac[0] = (u8)(shmem_info.mac_upper >> 8); info->mac[1] = (u8)(shmem_info.mac_upper); info->mac[2] = (u8)(shmem_info.mac_lower >> 24); info->mac[3] = (u8)(shmem_info.mac_lower >> 16); info->mac[4] = (u8)(shmem_info.mac_lower >> 8); info->mac[5] = (u8)(shmem_info.mac_lower); /* Store primary MAC for later possible WoL */ OSAL_MEMCPY(&p_hwfn->p_dev->wol_mac, info->mac, ETH_ALEN); } else { /* TODO - are there protocols for which there's no MAC? */ DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n"); } /* TODO - are these calculations true for BE machine? */ info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower | (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32); info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower | (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32); info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK); info->mtu = (u16)shmem_info.mtu_size; p_hwfn->hw_info.b_wol_support = ECORE_WOL_SUPPORT_NONE; p_hwfn->p_dev->wol_config = (u8)ECORE_OV_WOL_DEFAULT; if (ecore_mcp_is_init(p_hwfn)) { u32 resp = 0, param = 0; enum _ecore_status_t rc; rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OS_WOL, 0, &resp, ¶m); if (rc != ECORE_SUCCESS) return rc; if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED) p_hwfn->hw_info.b_wol_support = ECORE_WOL_SUPPORT_PME; } DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP), "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n", info->pause_on_host, info->protocol, info->bandwidth_min, info->bandwidth_max, info->mac[0], info->mac[1], info->mac[2], info->mac[3], info->mac[4], info->mac[5], (unsigned long long)info->wwn_port, (unsigned long long)info->wwn_node, info->ovlan, (u8)p_hwfn->hw_info.b_wol_support); return ECORE_SUCCESS; } struct ecore_mcp_link_params *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn) { if (!p_hwfn || !p_hwfn->mcp_info) return OSAL_NULL; return &p_hwfn->mcp_info->link_input; } struct ecore_mcp_link_state *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn) { if (!p_hwfn || !p_hwfn->mcp_info) return OSAL_NULL; #ifndef ASIC_ONLY if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n"); p_hwfn->mcp_info->link_output.link_up = true; } #endif return &p_hwfn->mcp_info->link_output; } struct ecore_mcp_link_capabilities *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn) { if (!p_hwfn || !p_hwfn->mcp_info) return OSAL_NULL; return &p_hwfn->mcp_info->link_capabilities; } enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { u32 resp = 0, param = 0; enum _ecore_status_t rc; rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m); /* Wait for the drain to complete before returning */ OSAL_MSLEEP(1020); return rc; } #ifndef LINUX_REMOVE const struct ecore_mcp_function_info *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn) { if (!p_hwfn || !p_hwfn->mcp_info) return OSAL_NULL; return &p_hwfn->mcp_info->func_info; } int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 personalities) { enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT; struct public_func shmem_info; int i, count = 0, num_pfs; num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev); for (i = 0; i < num_pfs; i++) { ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID_BY_REL(p_hwfn, i)); if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE) continue; if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt, &protocol) != ECORE_SUCCESS) continue; if ((1 << ((u32)protocol)) & personalities) count++; } return count; } #endif enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *p_flash_size) { u32 flash_size; #ifndef ASIC_ONLY if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n"); return ECORE_INVAL; } #endif if (IS_VF(p_hwfn->p_dev)) return ECORE_INVAL; flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4); flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >> MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT; flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET)); *p_flash_size = flash_size; return ECORE_SUCCESS; } enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { struct ecore_dev *p_dev = p_hwfn->p_dev; if (p_dev->recov_in_prog) { DP_NOTICE(p_hwfn, false, "Avoid triggering a recovery since such a process is already in progress\n"); return ECORE_AGAIN; } DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n"); ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1); return ECORE_SUCCESS; } #define ECORE_RECOVERY_PROLOG_SLEEP_MS 100 enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev) { struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; enum _ecore_status_t rc; /* Allow ongoing PCIe transactions to complete */ OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS); /* Clear the PF's internal FID_enable in the PXP */ rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false); if (rc != ECORE_SUCCESS) DP_NOTICE(p_hwfn, false, "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n", rc); return rc; } static enum _ecore_status_t ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u8 vf_id, u8 num) { u32 resp = 0, param = 0, rc_param = 0; enum _ecore_status_t rc; /* Only Leader can configure MSIX, and need to take CMT into account */ if (!IS_LEAD_HWFN(p_hwfn)) return ECORE_SUCCESS; num *= p_hwfn->p_dev->num_hwfns; param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) & DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK; param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) & DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK; rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param, &resp, &rc_param); if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) { DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n", vf_id); rc = ECORE_INVAL; } else { DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n", num, vf_id); } return rc; } static enum _ecore_status_t ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u8 num) { u32 resp = 0, param = num, rc_param = 0; enum _ecore_status_t rc; rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX, param, &resp, &rc_param); if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) { DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n"); rc = ECORE_INVAL; } else { DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Requested 0x%02x MSI-x interrupts for VFs\n", num); } return rc; } enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u8 vf_id, u8 num) { if (ECORE_IS_BB(p_hwfn->p_dev)) return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num); else return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num); } enum _ecore_status_t ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_mcp_drv_version *p_ver) { struct ecore_mcp_mb_params mb_params; struct drv_version_stc drv_version; u32 num_words, i; void *p_name; OSAL_BE32 val; enum _ecore_status_t rc; #ifndef ASIC_ONLY if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) return ECORE_SUCCESS; #endif OSAL_MEM_ZERO(&drv_version, sizeof(drv_version)); drv_version.version = p_ver->version; num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4; for (i = 0; i < num_words; i++) { /* The driver name is expected to be in a big-endian format */ p_name = &p_ver->name[i * sizeof(u32)]; val = OSAL_CPU_TO_BE32(*(u32 *)p_name); *(u32 *)&drv_version.name[i * sizeof(u32)] = val; } OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_SET_VERSION; mb_params.p_data_src = &drv_version; mb_params.data_src_size = sizeof(drv_version); rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc != ECORE_SUCCESS) DP_ERR(p_hwfn, "MCP response failure, aborting\n"); return rc; } /* A maximal 100 msec waiting time for the MCP to halt */ #define ECORE_MCP_HALT_SLEEP_MS 10 #define ECORE_MCP_HALT_MAX_RETRIES 10 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { u32 resp = 0, param = 0, cpu_state, cnt = 0; enum _ecore_status_t rc; rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, ¶m); if (rc != ECORE_SUCCESS) { DP_ERR(p_hwfn, "MCP response failure, aborting\n"); return rc; } do { OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS); cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) break; } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES); if (cnt == ECORE_MCP_HALT_MAX_RETRIES) { DP_NOTICE(p_hwfn, false, "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state); return ECORE_BUSY; } ecore_mcp_cmd_set_blocking(p_hwfn, true); return ECORE_SUCCESS; } #define ECORE_MCP_RESUME_SLEEP_MS 10 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { u32 cpu_mode, cpu_state; ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT; ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode); OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS); cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) { DP_NOTICE(p_hwfn, false, "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", cpu_mode, cpu_state); return ECORE_BUSY; } ecore_mcp_cmd_set_blocking(p_hwfn, false); return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, enum ecore_ov_client client) { u32 resp = 0, param = 0; u32 drv_mb_param; enum _ecore_status_t rc; switch (client) { case ECORE_OV_CLIENT_DRV: drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS; break; case ECORE_OV_CLIENT_USER: drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER; break; case ECORE_OV_CLIENT_VENDOR_SPEC: drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC; break; default: DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client); return ECORE_INVAL; } rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG, drv_mb_param, &resp, ¶m); if (rc != ECORE_SUCCESS) DP_ERR(p_hwfn, "MCP response failure, aborting\n"); return rc; } enum _ecore_status_t ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, enum ecore_ov_driver_state drv_state) { u32 resp = 0, param = 0; u32 drv_mb_param; enum _ecore_status_t rc; switch (drv_state) { case ECORE_OV_DRIVER_STATE_NOT_LOADED: drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED; break; case ECORE_OV_DRIVER_STATE_DISABLED: drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED; break; case ECORE_OV_DRIVER_STATE_ACTIVE: drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE; break; default: DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state); return ECORE_INVAL; } rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE, drv_mb_param, &resp, ¶m); if (rc != ECORE_SUCCESS) DP_ERR(p_hwfn, "Failed to send driver state\n"); return rc; } enum _ecore_status_t ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_fc_npiv_tbl *p_table) { struct dci_fc_npiv_tbl *p_npiv_table; u8 *p_buf = OSAL_NULL; u32 addr, size, i; enum _ecore_status_t rc = ECORE_SUCCESS; p_table->num_wwpn = 0; p_table->num_wwnn = 0; addr = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + OFFSETOF(struct public_port, fc_npiv_nvram_tbl_addr)); if (addr == NPIV_TBL_INVALID_ADDR) { DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "NPIV table doesn't exist\n"); return rc; } size = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + OFFSETOF(struct public_port, fc_npiv_nvram_tbl_size)); if (!size) { DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "NPIV table is empty\n"); return rc; } p_buf = OSAL_VZALLOC(p_hwfn->p_dev, size); if (!p_buf) { DP_ERR(p_hwfn, "Buffer allocation failed\n"); return ECORE_NOMEM; } rc = ecore_mcp_nvm_read(p_hwfn->p_dev, addr, p_buf, size); if (rc != ECORE_SUCCESS) { OSAL_VFREE(p_hwfn->p_dev, p_buf); return rc; } p_npiv_table = (struct dci_fc_npiv_tbl *)p_buf; p_table->num_wwpn = (u16)p_npiv_table->fc_npiv_cfg.num_of_npiv; p_table->num_wwnn = (u16)p_npiv_table->fc_npiv_cfg.num_of_npiv; for (i = 0; i < p_table->num_wwpn; i++) { OSAL_MEMCPY(p_table->wwpn, p_npiv_table->settings[i].npiv_wwpn, ECORE_WWN_SIZE); OSAL_MEMCPY(p_table->wwnn, p_npiv_table->settings[i].npiv_wwnn, ECORE_WWN_SIZE); } OSAL_VFREE(p_hwfn->p_dev, p_buf); return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 mtu) { u32 resp = 0, param = 0; u32 drv_mb_param; enum _ecore_status_t rc; drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_OFFSET; rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU, drv_mb_param, &resp, ¶m); if (rc != ECORE_SUCCESS) DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc); return rc; } enum _ecore_status_t ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u8 *mac) { struct ecore_mcp_mb_params mb_params; u32 mfw_mac[2]; enum _ecore_status_t rc; OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_SET_VMAC; mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC << DRV_MSG_CODE_VMAC_TYPE_OFFSET; mb_params.param |= MCP_PF_ID(p_hwfn); /* MCP is BE, and on LE platforms PCI would swap access to SHMEM * in 32-bit granularity. * So the MAC has to be set in native order [and not byte order], * otherwise it would be read incorrectly by MFW after swap. */ mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3]; mfw_mac[1] = mac[4] << 24 | mac[5] << 16; mb_params.p_data_src = (u8 *)mfw_mac; mb_params.data_src_size = 8; rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc != ECORE_SUCCESS) DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc); /* Store primary MAC for later possible WoL */ OSAL_MEMCPY(p_hwfn->p_dev->wol_mac, mac, ETH_ALEN); return rc; } enum _ecore_status_t ecore_mcp_ov_update_wol(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, enum ecore_ov_wol wol) { u32 resp = 0, param = 0; u32 drv_mb_param; enum _ecore_status_t rc; if (p_hwfn->hw_info.b_wol_support == ECORE_WOL_SUPPORT_NONE) { DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Can't change WoL configuration when WoL isn't supported\n"); return ECORE_INVAL; } switch (wol) { case ECORE_OV_WOL_DEFAULT: drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT; break; case ECORE_OV_WOL_DISABLED: drv_mb_param = DRV_MB_PARAM_WOL_DISABLED; break; case ECORE_OV_WOL_ENABLED: drv_mb_param = DRV_MB_PARAM_WOL_ENABLED; break; default: DP_ERR(p_hwfn, "Invalid wol state %d\n", wol); return ECORE_INVAL; } rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL, drv_mb_param, &resp, ¶m); if (rc != ECORE_SUCCESS) DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc); /* Store the WoL update for a future unload */ p_hwfn->p_dev->wol_config = (u8)wol; return rc; } enum _ecore_status_t ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, enum ecore_ov_eswitch eswitch) { u32 resp = 0, param = 0; u32 drv_mb_param; enum _ecore_status_t rc; switch (eswitch) { case ECORE_OV_ESWITCH_NONE: drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE; break; case ECORE_OV_ESWITCH_VEB: drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB; break; case ECORE_OV_ESWITCH_VEPA: drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA; break; default: DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch); return ECORE_INVAL; } rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE, drv_mb_param, &resp, ¶m); if (rc != ECORE_SUCCESS) DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc); return rc; } enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, enum ecore_led_mode mode) { u32 resp = 0, param = 0, drv_mb_param; enum _ecore_status_t rc; switch (mode) { case ECORE_LED_MODE_ON: drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON; break; case ECORE_LED_MODE_OFF: drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF; break; case ECORE_LED_MODE_RESTORE: drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER; break; default: DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode); return ECORE_INVAL; } rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE, drv_mb_param, &resp, ¶m); if (rc != ECORE_SUCCESS) DP_ERR(p_hwfn, "MCP response failure, aborting\n"); return rc; } enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 mask_parities) { u32 resp = 0, param = 0; enum _ecore_status_t rc; rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES, mask_parities, &resp, ¶m); if (rc != ECORE_SUCCESS) { DP_ERR(p_hwfn, "MCP response failure for mask parities, aborting\n"); } else if (resp != FW_MSG_CODE_OK) { DP_ERR(p_hwfn, "MCP did not acknowledge mask parity request. Old MFW?\n"); rc = ECORE_INVAL; } return rc; } enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr, u8 *p_buf, u32 len) { struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); u32 bytes_left, offset, bytes_to_copy, buf_size; u32 nvm_offset, resp = 0, param; struct ecore_ptt *p_ptt; enum _ecore_status_t rc = ECORE_SUCCESS; p_ptt = ecore_ptt_acquire(p_hwfn); if (!p_ptt) return ECORE_BUSY; bytes_left = len; offset = 0; while (bytes_left > 0) { bytes_to_copy = OSAL_MIN_T(u32, bytes_left, MCP_DRV_NVM_BUF_LEN); nvm_offset = (addr + offset) | (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_OFFSET); rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_READ_NVRAM, nvm_offset, &resp, ¶m, &buf_size, (u32 *)(p_buf + offset)); if (rc != ECORE_SUCCESS) { DP_NOTICE(p_dev, false, "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n", rc); resp = FW_MSG_CODE_ERROR; break; } if (resp != FW_MSG_CODE_NVM_OK) { DP_NOTICE(p_dev, false, "nvm read failed, resp = 0x%08x\n", resp); rc = ECORE_UNKNOWN_ERROR; break; } /* This can be a lengthy process, and it's possible scheduler * isn't preemptable. Sleep a bit to prevent CPU hogging. */ if (bytes_left % 0x1000 < (bytes_left - buf_size) % 0x1000) OSAL_MSLEEP(1); offset += buf_size; bytes_left -= buf_size; } p_dev->mcp_nvm_resp = resp; ecore_ptt_release(p_hwfn, p_ptt); return rc; } enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd, u32 addr, u8 *p_buf, u32 len) { struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); struct ecore_ptt *p_ptt; u32 resp, param; enum _ecore_status_t rc; p_ptt = ecore_ptt_acquire(p_hwfn); if (!p_ptt) return ECORE_BUSY; rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, (cmd == ECORE_PHY_CORE_READ) ? DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ, addr, &resp, ¶m, &len, (u32 *)p_buf); if (rc != ECORE_SUCCESS) DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc); p_dev->mcp_nvm_resp = resp; ecore_ptt_release(p_hwfn, p_ptt); return rc; } enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf) { struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); struct ecore_ptt *p_ptt; p_ptt = ecore_ptt_acquire(p_hwfn); if (!p_ptt) return ECORE_BUSY; OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp)); ecore_ptt_release(p_hwfn, p_ptt); return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr) { struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); struct ecore_ptt *p_ptt; u32 resp, param; enum _ecore_status_t rc; p_ptt = ecore_ptt_acquire(p_hwfn); if (!p_ptt) return ECORE_BUSY; rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr, &resp, ¶m); p_dev->mcp_nvm_resp = resp; ecore_ptt_release(p_hwfn, p_ptt); return rc; } enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev, u32 addr) { struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); struct ecore_ptt *p_ptt; u32 resp, param; enum _ecore_status_t rc; p_ptt = ecore_ptt_acquire(p_hwfn); if (!p_ptt) return ECORE_BUSY; rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr, &resp, ¶m); p_dev->mcp_nvm_resp = resp; ecore_ptt_release(p_hwfn, p_ptt); return rc; } /* rc recieves ECORE_INVAL as default parameter because * it might not enter the while loop if the len is 0 */ enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd, u32 addr, u8 *p_buf, u32 len) { u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp = 0, param; struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); enum _ecore_status_t rc = ECORE_INVAL; struct ecore_ptt *p_ptt; p_ptt = ecore_ptt_acquire(p_hwfn); if (!p_ptt) return ECORE_BUSY; switch (cmd) { case ECORE_PUT_FILE_DATA: nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA; break; case ECORE_NVM_WRITE_NVRAM: nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM; break; case ECORE_EXT_PHY_FW_UPGRADE: nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE; break; case ECORE_ENCRYPT_PASSWORD: nvm_cmd = DRV_MSG_CODE_ENCRYPT_PASSWORD; break; default: DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n", cmd); rc = ECORE_INVAL; goto out; } buf_idx = 0; while (buf_idx < len) { buf_size = OSAL_MIN_T(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN); nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) | addr) + buf_idx; rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset, &resp, ¶m, buf_size, (u32 *)&p_buf[buf_idx]); if (rc != ECORE_SUCCESS) { DP_NOTICE(p_dev, false, "ecore_mcp_nvm_write() failed, rc = %d\n", rc); resp = FW_MSG_CODE_ERROR; break; } if (resp != FW_MSG_CODE_OK && resp != FW_MSG_CODE_NVM_OK && resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) { DP_NOTICE(p_dev, false, "nvm write failed, resp = 0x%08x\n", resp); rc = ECORE_UNKNOWN_ERROR; break; } /* This can be a lengthy process, and it's possible scheduler * isn't preemptable. Sleep a bit to prevent CPU hogging. */ if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000) OSAL_MSLEEP(1); buf_idx += buf_size; } p_dev->mcp_nvm_resp = resp; out: ecore_ptt_release(p_hwfn, p_ptt); return rc; } enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd, u32 addr, u8 *p_buf, u32 len) { struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); struct ecore_ptt *p_ptt; u32 resp, param, nvm_cmd; enum _ecore_status_t rc; p_ptt = ecore_ptt_acquire(p_hwfn); if (!p_ptt) return ECORE_BUSY; nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ? DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE; rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr, &resp, ¶m, len, (u32 *)p_buf); if (rc != ECORE_SUCCESS) DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc); p_dev->mcp_nvm_resp = resp; ecore_ptt_release(p_hwfn, p_ptt); return rc; } enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev, u32 addr) { struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); struct ecore_ptt *p_ptt; u32 resp, param; enum _ecore_status_t rc; p_ptt = ecore_ptt_acquire(p_hwfn); if (!p_ptt) return ECORE_BUSY; rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr, &resp, ¶m); p_dev->mcp_nvm_resp = resp; ecore_ptt_release(p_hwfn, p_ptt); return rc; } enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf) { u32 bytes_left, bytes_to_copy, buf_size, nvm_offset; u32 resp, param; enum _ecore_status_t rc; nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) | (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET); addr = offset; offset = 0; bytes_left = len; while (bytes_left > 0) { bytes_to_copy = OSAL_MIN_T(u32, bytes_left, MAX_I2C_TRANSACTION_SIZE); nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); nvm_offset |= ((addr + offset) << DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET); nvm_offset |= (bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET); rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_TRANSCEIVER_READ, nvm_offset, &resp, ¶m, &buf_size, (u32 *)(p_buf + offset)); if (rc != ECORE_SUCCESS) { DP_NOTICE(p_hwfn, false, "Failed to send a transceiver read command to the MFW. rc = %d.\n", rc); return rc; } if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) return ECORE_NODEV; else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) return ECORE_UNKNOWN_ERROR; offset += buf_size; bytes_left -= buf_size; } return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf) { u32 buf_idx, buf_size, nvm_offset, resp, param; enum _ecore_status_t rc; nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) | (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET); buf_idx = 0; while (buf_idx < len) { buf_size = OSAL_MIN_T(u32, (len - buf_idx), MAX_I2C_TRANSACTION_SIZE); nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); nvm_offset |= ((offset + buf_idx) << DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET); nvm_offset |= (buf_size << DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET); rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_TRANSCEIVER_WRITE, nvm_offset, &resp, ¶m, buf_size, (u32 *)&p_buf[buf_idx]); if (rc != ECORE_SUCCESS) { DP_NOTICE(p_hwfn, false, "Failed to send a transceiver write command to the MFW. rc = %d.\n", rc); return rc; } if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) return ECORE_NODEV; else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) return ECORE_UNKNOWN_ERROR; buf_idx += buf_size; } return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 gpio, u32 *gpio_val) { enum _ecore_status_t rc = ECORE_SUCCESS; u32 drv_mb_param = 0, rsp; drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET); rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ, drv_mb_param, &rsp, gpio_val); if (rc != ECORE_SUCCESS) return rc; if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK) return ECORE_UNKNOWN_ERROR; return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 gpio, u16 gpio_val) { enum _ecore_status_t rc = ECORE_SUCCESS; u32 drv_mb_param = 0, param, rsp; drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) | (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET); rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE, drv_mb_param, &rsp, ¶m); if (rc != ECORE_SUCCESS) return rc; if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK) return ECORE_UNKNOWN_ERROR; return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 gpio, u32 *gpio_direction, u32 *gpio_ctrl) { u32 drv_mb_param = 0, rsp, val = 0; enum _ecore_status_t rc = ECORE_SUCCESS; drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET; rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO, drv_mb_param, &rsp, &val); if (rc != ECORE_SUCCESS) return rc; *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >> DRV_MB_PARAM_GPIO_DIRECTION_OFFSET; *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >> DRV_MB_PARAM_GPIO_CTRL_OFFSET; if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK) return ECORE_UNKNOWN_ERROR; return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { u32 drv_mb_param = 0, rsp, param; enum _ecore_status_t rc = ECORE_SUCCESS; drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST << DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, drv_mb_param, &rsp, ¶m); if (rc != ECORE_SUCCESS) return rc; if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || (param != DRV_MB_PARAM_BIST_RC_PASSED)) rc = ECORE_UNKNOWN_ERROR; return rc; } enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { u32 drv_mb_param, rsp, param; enum _ecore_status_t rc = ECORE_SUCCESS; drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST << DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, drv_mb_param, &rsp, ¶m); if (rc != ECORE_SUCCESS) return rc; if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || (param != DRV_MB_PARAM_BIST_RC_PASSED)) rc = ECORE_UNKNOWN_ERROR; return rc; } enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images( struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images) { u32 drv_mb_param = 0, rsp; enum _ecore_status_t rc = ECORE_SUCCESS; drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES << DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, drv_mb_param, &rsp, num_images); if (rc != ECORE_SUCCESS) return rc; if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK)) rc = ECORE_UNKNOWN_ERROR; return rc; } enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att( struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct bist_nvm_image_att *p_image_att, u32 image_index) { u32 buf_size, nvm_offset, resp, param; enum _ecore_status_t rc; nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX << DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); nvm_offset |= (image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET); rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, nvm_offset, &resp, ¶m, &buf_size, (u32 *)p_image_att); if (rc != ECORE_SUCCESS) return rc; if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || (p_image_att->return_code != 1)) rc = ECORE_UNKNOWN_ERROR; return rc; } enum _ecore_status_t ecore_mcp_get_nvm_image_att(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, enum ecore_nvm_images image_id, struct ecore_nvm_image_att *p_image_att) { struct bist_nvm_image_att mfw_image_att; enum nvm_image_type type; u32 num_images, i; enum _ecore_status_t rc; /* Translate image_id into MFW definitions */ switch (image_id) { case ECORE_NVM_IMAGE_ISCSI_CFG: type = NVM_TYPE_ISCSI_CFG; break; case ECORE_NVM_IMAGE_FCOE_CFG: type = NVM_TYPE_FCOE_CFG; break; case ECORE_NVM_IMAGE_MDUMP: type = NVM_TYPE_MDUMP; break; default: DP_NOTICE(p_hwfn, false, "Unknown request of image_id %08x\n", image_id); return ECORE_INVAL; } /* Learn number of images, then traverse and see if one fits */ rc = ecore_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images); if (rc != ECORE_SUCCESS || !num_images) return ECORE_INVAL; for (i = 0; i < num_images; i++) { rc = ecore_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt, &mfw_image_att, i); if (rc != ECORE_SUCCESS) return rc; if (type == mfw_image_att.image_type) break; } if (i == num_images) { DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, "Failed to find nvram image of type %08x\n", image_id); return ECORE_INVAL; } p_image_att->start_addr = mfw_image_att.nvm_start_addr; p_image_att->length = mfw_image_att.len; return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_get_nvm_image(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, enum ecore_nvm_images image_id, u8 *p_buffer, u32 buffer_len) { struct ecore_nvm_image_att image_att; enum _ecore_status_t rc; OSAL_MEM_ZERO(p_buffer, buffer_len); rc = ecore_mcp_get_nvm_image_att(p_hwfn, p_ptt, image_id, &image_att); if (rc != ECORE_SUCCESS) return rc; /* Validate sizes - both the image's and the supplied buffer's */ if (image_att.length <= 4) { DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, "Image [%d] is too small - only %d bytes\n", image_id, image_att.length); return ECORE_INVAL; } /* Each NVM image is suffixed by CRC; Upper-layer has no need for it */ image_att.length -= 4; if (image_att.length > buffer_len) { DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, "Image [%d] is too big - %08x bytes where only %08x are available\n", image_id, image_att.length, buffer_len); return ECORE_NOMEM; } return ecore_mcp_nvm_read(p_hwfn->p_dev, image_att.start_addr, p_buffer, image_att.length); } enum _ecore_status_t ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_temperature_info *p_temp_info) { struct ecore_temperature_sensor *p_temp_sensor; struct temperature_status_stc mfw_temp_info; struct ecore_mcp_mb_params mb_params; u32 val; enum _ecore_status_t rc; u8 i; OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE; mb_params.p_data_dst = &mfw_temp_info; mb_params.data_dst_size = sizeof(mfw_temp_info); rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc != ECORE_SUCCESS) return rc; OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS); p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors, ECORE_MAX_NUM_OF_SENSORS); for (i = 0; i < p_temp_info->num_sensors; i++) { val = mfw_temp_info.sensor[i]; p_temp_sensor = &p_temp_info->sensors[i]; p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >> SENSOR_LOCATION_OFFSET; p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >> THRESHOLD_HIGH_OFFSET; p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >> CRITICAL_TEMPERATURE_OFFSET; p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >> CURRENT_TEMP_OFFSET; } return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_get_mba_versions( struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_mba_vers *p_mba_vers) { u32 buf_size, resp, param; enum _ecore_status_t rc; rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION, 0, &resp, ¶m, &buf_size, &(p_mba_vers->mba_vers[0])); if (rc != ECORE_SUCCESS) return rc; if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) rc = ECORE_UNKNOWN_ERROR; if (buf_size != MCP_DRV_NVM_BUF_LEN) rc = ECORE_UNKNOWN_ERROR; return rc; } enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u64 *num_events) { struct ecore_mcp_mb_params mb_params; OSAL_MEMSET(&mb_params, 0, sizeof(struct ecore_mcp_mb_params)); mb_params.cmd = DRV_MSG_CODE_MEM_ECC_EVENTS; mb_params.p_data_dst = (union drv_union_data *)num_events; return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); } static enum resource_id_enum ecore_mcp_get_mfw_res_id(enum ecore_resources res_id) { enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID; switch (res_id) { case ECORE_SB: mfw_res_id = RESOURCE_NUM_SB_E; break; case ECORE_L2_QUEUE: mfw_res_id = RESOURCE_NUM_L2_QUEUE_E; break; case ECORE_VPORT: mfw_res_id = RESOURCE_NUM_VPORT_E; break; case ECORE_RSS_ENG: mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E; break; case ECORE_PQ: mfw_res_id = RESOURCE_NUM_PQ_E; break; case ECORE_RL: mfw_res_id = RESOURCE_NUM_RL_E; break; case ECORE_MAC: case ECORE_VLAN: /* Each VFC resource can accommodate both a MAC and a VLAN */ mfw_res_id = RESOURCE_VFC_FILTER_E; break; case ECORE_ILT: mfw_res_id = RESOURCE_ILT_E; break; case ECORE_LL2_QUEUE: mfw_res_id = RESOURCE_LL2_QUEUE_E; break; case ECORE_RDMA_CNQ_RAM: case ECORE_CMDQS_CQS: /* CNQ/CMDQS are the same resource */ mfw_res_id = RESOURCE_CQS_E; break; case ECORE_RDMA_STATS_QUEUE: mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E; break; case ECORE_BDQ: mfw_res_id = RESOURCE_BDQ_E; break; default: break; } return mfw_res_id; } #define ECORE_RESC_ALLOC_VERSION_MAJOR 2 #define ECORE_RESC_ALLOC_VERSION_MINOR 0 #define ECORE_RESC_ALLOC_VERSION \ ((ECORE_RESC_ALLOC_VERSION_MAJOR << \ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) | \ (ECORE_RESC_ALLOC_VERSION_MINOR << \ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET)) struct ecore_resc_alloc_in_params { u32 cmd; enum ecore_resources res_id; u32 resc_max_val; }; struct ecore_resc_alloc_out_params { u32 mcp_resp; u32 mcp_param; u32 resc_num; u32 resc_start; u32 vf_resc_num; u32 vf_resc_start; u32 flags; }; static enum _ecore_status_t ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_resc_alloc_in_params *p_in_params, struct ecore_resc_alloc_out_params *p_out_params) { struct ecore_mcp_mb_params mb_params; struct resource_info mfw_resc_info; enum _ecore_status_t rc; OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info)); mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id); if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) { DP_ERR(p_hwfn, "Failed to match resource %d [%s] with the MFW resources\n", p_in_params->res_id, ecore_hw_get_resc_name(p_in_params->res_id)); return ECORE_INVAL; } switch (p_in_params->cmd) { case DRV_MSG_SET_RESOURCE_VALUE_MSG: mfw_resc_info.size = p_in_params->resc_max_val; /* Fallthrough */ case DRV_MSG_GET_RESOURCE_ALLOC_MSG: break; default: DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n", p_in_params->cmd); return ECORE_INVAL; } OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); mb_params.cmd = p_in_params->cmd; mb_params.param = ECORE_RESC_ALLOC_VERSION; mb_params.p_data_src = &mfw_resc_info; mb_params.data_src_size = sizeof(mfw_resc_info); mb_params.p_data_dst = mb_params.p_data_src; mb_params.data_dst_size = mb_params.data_src_size; DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n", p_in_params->cmd, p_in_params->res_id, ecore_hw_get_resc_name(p_in_params->res_id), GET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), GET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), p_in_params->resc_max_val); rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc != ECORE_SUCCESS) return rc; p_out_params->mcp_resp = mb_params.mcp_resp; p_out_params->mcp_param = mb_params.mcp_param; p_out_params->resc_num = mfw_resc_info.size; p_out_params->resc_start = mfw_resc_info.offset; p_out_params->vf_resc_num = mfw_resc_info.vf_size; p_out_params->vf_resc_start = mfw_resc_info.vf_offset; p_out_params->flags = mfw_resc_info.flags; DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n", GET_MFW_FIELD(p_out_params->mcp_param, FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), GET_MFW_FIELD(p_out_params->mcp_param, FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), p_out_params->resc_num, p_out_params->resc_start, p_out_params->vf_resc_num, p_out_params->vf_resc_start, p_out_params->flags); return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, enum ecore_resources res_id, u32 resc_max_val, u32 *p_mcp_resp) { struct ecore_resc_alloc_out_params out_params; struct ecore_resc_alloc_in_params in_params; enum _ecore_status_t rc; OSAL_MEM_ZERO(&in_params, sizeof(in_params)); in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG; in_params.res_id = res_id; in_params.resc_max_val = resc_max_val; OSAL_MEM_ZERO(&out_params, sizeof(out_params)); rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params, &out_params); if (rc != ECORE_SUCCESS) return rc; *p_mcp_resp = out_params.mcp_resp; return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, enum ecore_resources res_id, u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start) { struct ecore_resc_alloc_out_params out_params; struct ecore_resc_alloc_in_params in_params; enum _ecore_status_t rc; OSAL_MEM_ZERO(&in_params, sizeof(in_params)); in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG; in_params.res_id = res_id; OSAL_MEM_ZERO(&out_params, sizeof(out_params)); rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params, &out_params); if (rc != ECORE_SUCCESS) return rc; *p_mcp_resp = out_params.mcp_resp; if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) { *p_resc_num = out_params.resc_num; *p_resc_start = out_params.resc_start; } return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { u32 mcp_resp, mcp_param; return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0, &mcp_resp, &mcp_param); } enum _ecore_status_t ecore_mcp_get_lldp_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u8 lldp_mac_addr[ETH_ALEN]) { struct ecore_mcp_mb_params mb_params; struct mcp_mac lldp_mac; enum _ecore_status_t rc; OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_GET_LLDP_MAC; mb_params.p_data_dst = &lldp_mac; mb_params.data_dst_size = sizeof(lldp_mac); rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc != ECORE_SUCCESS) return rc; if (mb_params.mcp_resp != FW_MSG_CODE_OK) { DP_NOTICE(p_hwfn, false, "MFW lacks support for the GET_LLDP_MAC command [resp 0x%08x]\n", mb_params.mcp_resp); return ECORE_INVAL; } *(u16 *)lldp_mac_addr = OSAL_BE16_TO_CPU(*(u16 *)&lldp_mac.mac_upper); *(u32 *)(lldp_mac_addr + 2) = OSAL_BE32_TO_CPU(lldp_mac.mac_lower); DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "LLDP MAC address is %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n", lldp_mac_addr[0], lldp_mac_addr[1], lldp_mac_addr[2], lldp_mac_addr[3], lldp_mac_addr[4], lldp_mac_addr[5]); return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_set_lldp_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u8 lldp_mac_addr[ETH_ALEN]) { struct ecore_mcp_mb_params mb_params; struct mcp_mac lldp_mac; enum _ecore_status_t rc; DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configuring LLDP MAC address to %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n", lldp_mac_addr[0], lldp_mac_addr[1], lldp_mac_addr[2], lldp_mac_addr[3], lldp_mac_addr[4], lldp_mac_addr[5]); OSAL_MEM_ZERO(&lldp_mac, sizeof(lldp_mac)); lldp_mac.mac_upper = OSAL_CPU_TO_BE16(*(u16 *)lldp_mac_addr); lldp_mac.mac_lower = OSAL_CPU_TO_BE32(*(u32 *)(lldp_mac_addr + 2)); OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_SET_LLDP_MAC; mb_params.p_data_src = &lldp_mac; mb_params.data_src_size = sizeof(lldp_mac); rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc != ECORE_SUCCESS) return rc; if (mb_params.mcp_resp != FW_MSG_CODE_OK) { DP_NOTICE(p_hwfn, false, "MFW lacks support for the SET_LLDP_MAC command [resp 0x%08x]\n", mb_params.mcp_resp); return ECORE_INVAL; } return ECORE_SUCCESS; } static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 param, u32 *p_mcp_resp, u32 *p_mcp_param) { enum _ecore_status_t rc; rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param, p_mcp_resp, p_mcp_param); if (rc != ECORE_SUCCESS) return rc; if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) { DP_INFO(p_hwfn, "The resource command is unsupported by the MFW\n"); return ECORE_NOTIMPL; } if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) { u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE); DP_NOTICE(p_hwfn, false, "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n", param, opcode); return ECORE_INVAL; } return rc; } static enum _ecore_status_t __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_resc_lock_params *p_params) { u32 param = 0, mcp_resp, mcp_param; u8 opcode, timeout; enum _ecore_status_t rc; switch (p_params->timeout) { case ECORE_MCP_RESC_LOCK_TO_DEFAULT: opcode = RESOURCE_OPCODE_REQ; timeout = 0; break; case ECORE_MCP_RESC_LOCK_TO_NONE: opcode = RESOURCE_OPCODE_REQ_WO_AGING; timeout = 0; break; default: opcode = RESOURCE_OPCODE_REQ_W_AGING; timeout = p_params->timeout; break; } SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, timeout); DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n", param, timeout, opcode, p_params->resource); /* Attempt to acquire the resource */ rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param); if (rc != ECORE_SUCCESS) return rc; /* Analyze the response */ p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER); opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n", mcp_param, opcode, p_params->owner); switch (opcode) { case RESOURCE_OPCODE_GNT: p_params->b_granted = true; break; case RESOURCE_OPCODE_BUSY: p_params->b_granted = false; break; default: DP_NOTICE(p_hwfn, false, "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n", mcp_param, opcode); return ECORE_INVAL; } return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_resc_lock_params *p_params) { u32 retry_cnt = 0; enum _ecore_status_t rc; do { /* No need for an interval before the first iteration */ if (retry_cnt) { if (p_params->sleep_b4_retry) { u32 retry_interval_in_ms = DIV_ROUND_UP(p_params->retry_interval, 1000); OSAL_MSLEEP(retry_interval_in_ms); } else { OSAL_UDELAY(p_params->retry_interval); } } rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params); if (rc != ECORE_SUCCESS) return rc; if (p_params->b_granted) break; } while (retry_cnt++ < p_params->retry_num); return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_resc_unlock_params *p_params) { u32 param = 0, mcp_resp, mcp_param; u8 opcode; enum _ecore_status_t rc; opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE : RESOURCE_OPCODE_RELEASE; SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n", param, opcode, p_params->resource); /* Attempt to release the resource */ rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param); if (rc != ECORE_SUCCESS) return rc; /* Analyze the response */ opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Resource unlock response: mcp_param 0x%08x [opcode %d]\n", mcp_param, opcode); switch (opcode) { case RESOURCE_OPCODE_RELEASED_PREVIOUS: DP_INFO(p_hwfn, "Resource unlock request for an already released resource [%d]\n", p_params->resource); /* Fallthrough */ case RESOURCE_OPCODE_RELEASED: p_params->b_released = true; break; case RESOURCE_OPCODE_WRONG_OWNER: p_params->b_released = false; break; default: DP_NOTICE(p_hwfn, false, "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n", mcp_param, opcode); return ECORE_INVAL; } return ECORE_SUCCESS; } void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock, struct ecore_resc_unlock_params *p_unlock, enum ecore_resc_lock resource, bool b_is_permanent) { if (p_lock != OSAL_NULL) { OSAL_MEM_ZERO(p_lock, sizeof(*p_lock)); /* Permanent resources don't require aging, and there's no * point in trying to acquire them more than once since it's * unexpected another entity would release them. */ if (b_is_permanent) { p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE; } else { p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT; p_lock->retry_interval = ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT; p_lock->sleep_b4_retry = true; } p_lock->resource = resource; } if (p_unlock != OSAL_NULL) { OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock)); p_unlock->resource = resource; } } enum _ecore_status_t ecore_mcp_update_fcoe_cvid(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 vlan) { u32 resp = 0, param = 0; enum _ecore_status_t rc; rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OEM_UPDATE_FCOE_CVID, (u32)vlan << DRV_MB_PARAM_FCOE_CVID_OFFSET, &resp, ¶m); if (rc != ECORE_SUCCESS) DP_ERR(p_hwfn, "Failed to update fcoe vlan, rc = %d\n", rc); return rc; } enum _ecore_status_t ecore_mcp_update_fcoe_fabric_name(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u8 *wwn) { struct ecore_mcp_mb_params mb_params; struct mcp_wwn fabric_name; enum _ecore_status_t rc; OSAL_MEM_ZERO(&fabric_name, sizeof(fabric_name)); fabric_name.wwn_upper = *(u32 *)wwn; fabric_name.wwn_lower = *(u32 *)(wwn + 4); OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_OEM_UPDATE_FCOE_FABRIC_NAME; mb_params.p_data_src = &fabric_name; mb_params.data_src_size = sizeof(fabric_name); rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc != ECORE_SUCCESS) DP_ERR(p_hwfn, "Failed to update fcoe wwn, rc = %d\n", rc); return rc; } void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 offset, u32 val) { struct ecore_mcp_mb_params mb_params = {0}; enum _ecore_status_t rc = ECORE_SUCCESS; u32 dword = val; mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG; mb_params.param = offset; mb_params.p_data_src = &dword; mb_params.data_src_size = sizeof(dword); rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc != ECORE_SUCCESS) { DP_NOTICE(p_hwfn, false, "Failed to wol write request, rc = %d\n", rc); } if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) { DP_NOTICE(p_hwfn, false, "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n", val, offset, mb_params.mcp_resp); rc = ECORE_UNKNOWN_ERROR; } } bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn) { return !!(p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ); } bool ecore_mcp_rlx_odr_supported(struct ecore_hwfn *p_hwfn) { return !!(p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_RELAXED_ORD); } enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { u32 mcp_resp; enum _ecore_status_t rc; rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT, 0, &mcp_resp, &p_hwfn->mcp_info->capabilities); if (rc == ECORE_SUCCESS) DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE), "MFW supported features: %08x\n", p_hwfn->mcp_info->capabilities); return rc; } enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { u32 mcp_resp, mcp_param, features; features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ | DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE | DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK; return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, features, &mcp_resp, &mcp_param); } enum _ecore_status_t ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_mcp_drv_attr *p_drv_attr) { struct attribute_cmd_write_stc attr_cmd_write; enum _attribute_commands_e mfw_attr_cmd; struct ecore_mcp_mb_params mb_params; enum _ecore_status_t rc; switch (p_drv_attr->attr_cmd) { case ECORE_MCP_DRV_ATTR_CMD_READ: mfw_attr_cmd = ATTRIBUTE_CMD_READ; break; case ECORE_MCP_DRV_ATTR_CMD_WRITE: mfw_attr_cmd = ATTRIBUTE_CMD_WRITE; break; case ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR: mfw_attr_cmd = ATTRIBUTE_CMD_READ_CLEAR; break; case ECORE_MCP_DRV_ATTR_CMD_CLEAR: mfw_attr_cmd = ATTRIBUTE_CMD_CLEAR; break; default: DP_NOTICE(p_hwfn, false, "Unknown attribute command %d\n", p_drv_attr->attr_cmd); return ECORE_INVAL; } OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_ATTRIBUTE; SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_KEY, p_drv_attr->attr_num); SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_CMD, mfw_attr_cmd); if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_WRITE) { OSAL_MEM_ZERO(&attr_cmd_write, sizeof(attr_cmd_write)); attr_cmd_write.val = p_drv_attr->val; attr_cmd_write.mask = p_drv_attr->mask; attr_cmd_write.offset = p_drv_attr->offset; mb_params.p_data_src = &attr_cmd_write; mb_params.data_src_size = sizeof(attr_cmd_write); } rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc != ECORE_SUCCESS) return rc; if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { DP_INFO(p_hwfn, "The attribute command is not supported by the MFW\n"); return ECORE_NOTIMPL; } else if (mb_params.mcp_resp != FW_MSG_CODE_OK) { DP_INFO(p_hwfn, "Failed to send an attribute command [mcp_resp 0x%x, attr_cmd %d, attr_num %d]\n", mb_params.mcp_resp, p_drv_attr->attr_cmd, p_drv_attr->attr_num); return ECORE_INVAL; } DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Attribute Command: cmd %d [mfw_cmd %d], num %d, in={val 0x%08x, mask 0x%08x, offset 0x%08x}, out={val 0x%08x}\n", p_drv_attr->attr_cmd, mfw_attr_cmd, p_drv_attr->attr_num, p_drv_attr->val, p_drv_attr->mask, p_drv_attr->offset, mb_params.mcp_param); if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ || p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR) p_drv_attr->val = mb_params.mcp_param; return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_get_engine_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { struct ecore_dev *p_dev = p_hwfn->p_dev; struct ecore_mcp_mb_params mb_params; u8 fir_valid, l2_valid; enum _ecore_status_t rc; OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG; rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc != ECORE_SUCCESS) return rc; if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { DP_INFO(p_hwfn, "The get_engine_config command is unsupported by the MFW\n"); return ECORE_NOTIMPL; } fir_valid = GET_MFW_FIELD(mb_params.mcp_param, FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID); if (fir_valid) p_dev->fir_affin = GET_MFW_FIELD(mb_params.mcp_param, FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE); l2_valid = GET_MFW_FIELD(mb_params.mcp_param, FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID); if (l2_valid) p_dev->l2_affin_hint = GET_MFW_FIELD(mb_params.mcp_param, FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE); DP_INFO(p_hwfn, "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n", fir_valid, p_dev->fir_affin, l2_valid, p_dev->l2_affin_hint); return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { struct ecore_dev *p_dev = p_hwfn->p_dev; struct ecore_mcp_mb_params mb_params; enum _ecore_status_t rc; OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP; rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc != ECORE_SUCCESS) return rc; if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { DP_INFO(p_hwfn, "The get_ppfid_bitmap command is unsupported by the MFW\n"); return ECORE_NOTIMPL; } p_dev->ppfid_bitmap = GET_MFW_FIELD(mb_params.mcp_param, FW_MB_PARAM_PPFID_BITMAP); DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "PPFID bitmap 0x%hhx\n", p_dev->ppfid_bitmap); return ECORE_SUCCESS; } enum _ecore_status_t ecore_mcp_ind_table_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u8 retry_num, u32 retry_interval) { struct ecore_resc_lock_params resc_lock_params; enum _ecore_status_t rc; OSAL_MEM_ZERO(&resc_lock_params, sizeof(struct ecore_resc_lock_params)); resc_lock_params.resource = ECORE_RESC_LOCK_IND_TABLE; if (!retry_num) retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT; resc_lock_params.retry_num = retry_num; if (!retry_interval) retry_interval = ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT; resc_lock_params.retry_interval = retry_interval; rc = ecore_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params); if (rc == ECORE_SUCCESS && !resc_lock_params.b_granted) { DP_NOTICE(p_hwfn, false, "Failed to acquire the resource lock for IDT access\n"); return ECORE_BUSY; } return rc; } enum _ecore_status_t ecore_mcp_ind_table_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { struct ecore_resc_unlock_params resc_unlock_params; enum _ecore_status_t rc; OSAL_MEM_ZERO(&resc_unlock_params, sizeof(struct ecore_resc_unlock_params)); resc_unlock_params.resource = ECORE_RESC_LOCK_IND_TABLE; rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params); return rc; } #ifdef _NTDDK_ #pragma warning(pop) #endif diff --git a/sys/dev/qlnx/qlnxe/ecore_mcp.h b/sys/dev/qlnx/qlnxe/ecore_mcp.h index c94583cdfba3..edb1f9083467 100644 --- a/sys/dev/qlnx/qlnxe/ecore_mcp.h +++ b/sys/dev/qlnx/qlnxe/ecore_mcp.h @@ -1,684 +1,684 @@ /* * Copyright (c) 2017-2018 Cavium, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #ifndef __ECORE_MCP_H__ #define __ECORE_MCP_H__ #include "bcm_osal.h" #include "mcp_public.h" #include "ecore.h" #include "ecore_mcp_api.h" #include "ecore_dev_api.h" /* Using hwfn number (and not pf_num) is required since in CMT mode, * same pf_num may be used by two different hwfn * TODO - this shouldn't really be in .h file, but until all fields * required during hw-init will be placed in their correct place in shmem * we need it in ecore_dev.c [for readin the nvram reflection in shmem]. */ #define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (ECORE_IS_BB((p_hwfn)->p_dev) ? \ ((rel_pfid) | \ ((p_hwfn)->abs_pf_id & 1) << 3) : \ rel_pfid) #define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id) struct ecore_mcp_info { /* List for mailbox commands which were sent and wait for a response */ osal_list_t cmd_list; - /* Spinlock used for protecting the access to the mailbox commands list + /* Lock used for protecting the access to the mailbox commands list * and the sending of the commands. */ - osal_spinlock_t cmd_lock; + osal_mutex_t cmd_lock; /* Flag to indicate whether sending a MFW mailbox command is blocked */ bool b_block_cmd; /* Spinlock used for syncing SW link-changes and link-changes * originating from attention context. */ - osal_spinlock_t link_lock; + osal_mutex_t link_lock; /* Address of the MCP public area */ u32 public_base; /* Address of the driver mailbox */ u32 drv_mb_addr; /* Address of the MFW mailbox */ u32 mfw_mb_addr; /* Address of the port configuration (link) */ u32 port_addr; /* Current driver mailbox sequence */ u16 drv_mb_seq; /* Current driver pulse sequence */ u16 drv_pulse_seq; struct ecore_mcp_link_params link_input; struct ecore_mcp_link_state link_output; struct ecore_mcp_link_capabilities link_capabilities; struct ecore_mcp_function_info func_info; u8 *mfw_mb_cur; u8 *mfw_mb_shadow; u16 mfw_mb_length; u32 mcp_hist; /* Capabilties negotiated with the MFW */ u32 capabilities; }; struct ecore_mcp_mb_params { u32 cmd; u32 param; void *p_data_src; void *p_data_dst; u32 mcp_resp; u32 mcp_param; u8 data_src_size; u8 data_dst_size; u32 flags; #define ECORE_MB_FLAG_CAN_SLEEP (0x1 << 0) #define ECORE_MB_FLAG_AVOID_BLOCK (0x1 << 1) #define ECORE_MB_FLAGS_IS_SET(params, flag) \ ((params) != OSAL_NULL && ((params)->flags & ECORE_MB_FLAG_##flag)) }; enum ecore_ov_eswitch { ECORE_OV_ESWITCH_NONE, ECORE_OV_ESWITCH_VEB, ECORE_OV_ESWITCH_VEPA }; struct ecore_drv_tlv_hdr { u8 tlv_type; /* According to the enum below */ u8 tlv_length; /* In dwords - not including this header */ u8 tlv_reserved; #define ECORE_DRV_TLV_FLAGS_CHANGED 0x01 u8 tlv_flags; }; /** * @brief Initialize the interface with the MCP * * @param p_hwfn - HW func * @param p_ptt - PTT required for register access * * @return enum _ecore_status_t */ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); /** * @brief Initialize the port interface with the MCP * * @param p_hwfn * @param p_ptt * Can only be called after `num_ports_in_engine' is set */ void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); /** * @brief Releases resources allocated during the init process. * * @param p_hwfn - HW func * @param p_ptt - PTT required for register access * * @return enum _ecore_status_t */ enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn); /** * @brief This function is called from the DPC context. After * pointing PTT to the mfw mb, check for events sent by the MCP * to the driver and ack them. In case a critical event * detected, it will be handled here, otherwise the work will be * queued to a sleepable work-queue. * * @param p_hwfn - HW function * @param p_ptt - PTT required for register access * @return enum _ecore_status_t - ECORE_SUCCESS - operation * was successul. */ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); /** * @brief When MFW doesn't get driver pulse for couple of seconds, at some * threshold before timeout expires, it will generate interrupt * through a dedicated status block (DPSB - Driver Pulse Status * Block), which the driver should respond immediately, by * providing keepalive indication after setting the PTT to the * driver-MFW mailbox. This function is called directly from the * DPC upon receiving the DPSB attention. * * @param p_hwfn - hw function * @param p_ptt - PTT required for register access * @return enum _ecore_status_t - ECORE_SUCCESS - operation * was successful. */ enum _ecore_status_t ecore_issue_pulse(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); enum ecore_drv_role { ECORE_DRV_ROLE_OS, ECORE_DRV_ROLE_KDUMP, }; struct ecore_load_req_params { /* Input params */ enum ecore_drv_role drv_role; u8 timeout_val; /* 1..254, '0' - default value, '255' - no timeout */ bool avoid_eng_reset; enum ecore_override_force_load override_force_load; /* Output params */ u32 load_code; }; /** * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds, * returns whether this PF is the first on the engine/port or function. * * @param p_hwfn * @param p_ptt * @param p_params * * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful. */ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_load_req_params *p_params); /** * @brief Sends a LOAD_DONE message to the MFW * * @param p_hwfn * @param p_ptt * * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful. */ enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); /** * @brief Sends a CANCEL_LOAD_REQ message to the MFW * * @param p_hwfn * @param p_ptt * * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful. */ enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); /** * @brief Sends a UNLOAD_REQ message to the MFW * * @param p_hwfn * @param p_ptt * * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful. */ enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); /** * @brief Sends a UNLOAD_DONE message to the MFW * * @param p_hwfn * @param p_ptt * * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful. */ enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); /** * @brief Read the MFW mailbox into Current buffer. * * @param p_hwfn * @param p_ptt */ void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); /** * @brief Ack to mfw that driver finished FLR process for VFs * * @param p_hwfn * @param p_ptt * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks. * * @param return enum _ecore_status_t - ECORE_SUCCESS upon success. */ enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *vfs_to_ack); /** * @brief - calls during init to read shmem of all function-related info. * * @param p_hwfn * * @param return ECORE_SUCCESS upon success. */ enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); /** * @brief - Reset the MCP using mailbox command. * * @param p_hwfn * @param p_ptt * * @param return ECORE_SUCCESS upon success. */ enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); /** * @brief indicates whether the MFW objects [under mcp_info] are accessible * * @param p_hwfn * * @return true iff MFW is running and mcp_info is initialized */ bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn); /** * @brief request MFW to configure MSI-X for a VF * * @param p_hwfn * @param p_ptt * @param vf_id - absolute inside engine * @param num_sbs - number of entries to request * * @return enum _ecore_status_t */ enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u8 vf_id, u8 num); /** * @brief - Halt the MCP. * * @param p_hwfn * @param p_ptt * * @param return ECORE_SUCCESS upon success. */ enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); /** * @brief - Wake up the MCP. * * @param p_hwfn * @param p_ptt * * @param return ECORE_SUCCESS upon success. */ enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_mcp_link_state *p_link, u8 max_bw); int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_mcp_link_state *p_link, u8 min_bw); enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 mask_parities); #if 0 enum _ecore_status_t ecore_hw_init_first_eth(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u8 *p_pf); #endif /** * @brief - Sends crash mdump related info to the MFW. * * @param p_hwfn * @param p_ptt * @param epoch * * @param return ECORE_SUCCESS upon success. */ enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 epoch); /** * @brief - Triggers a MFW crash dump procedure. * * @param p_hwfn * @param p_ptt * * @param return ECORE_SUCCESS upon success. */ enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); struct ecore_mdump_retain_data { u32 valid; u32 epoch; u32 pf; u32 status; }; /** * @brief - Gets the mdump retained data from the MFW. * * @param p_hwfn * @param p_ptt * @param p_mdump_retain * * @param return ECORE_SUCCESS upon success. */ enum _ecore_status_t ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_mdump_retain_data *p_mdump_retain); /** * @brief - Sets the MFW's max value for the given resource * * @param p_hwfn * @param p_ptt * @param res_id * @param resc_max_val * @param p_mcp_resp * * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. */ enum _ecore_status_t ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, enum ecore_resources res_id, u32 resc_max_val, u32 *p_mcp_resp); /** * @brief - Gets the MFW allocation info for the given resource * * @param p_hwfn * @param p_ptt * @param res_id * @param p_mcp_resp * @param p_resc_num * @param p_resc_start * * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. */ enum _ecore_status_t ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, enum ecore_resources res_id, u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start); /** * @brief - Initiates PF FLR * * @param p_hwfn * @param p_ptt * * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. */ enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); /** * @brief Send eswitch mode to MFW * * @param p_hwfn * @param p_ptt * @param eswitch - eswitch mode * * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. */ enum _ecore_status_t ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, enum ecore_ov_eswitch eswitch); #define ECORE_MCP_RESC_LOCK_MIN_VAL RESOURCE_DUMP /* 0 */ #define ECORE_MCP_RESC_LOCK_MAX_VAL 31 enum ecore_resc_lock { ECORE_RESC_LOCK_DBG_DUMP = ECORE_MCP_RESC_LOCK_MIN_VAL, /* Locks that the MFW is aware of should be added here downwards */ /* Ecore only locks should be added here upwards */ ECORE_RESC_LOCK_IND_TABLE = 26, ECORE_RESC_LOCK_PTP_PORT0 = 27, ECORE_RESC_LOCK_PTP_PORT1 = 28, ECORE_RESC_LOCK_PTP_PORT2 = 29, ECORE_RESC_LOCK_PTP_PORT3 = 30, ECORE_RESC_LOCK_RESC_ALLOC = ECORE_MCP_RESC_LOCK_MAX_VAL, /* A dummy value to be used for auxillary functions in need of * returning an 'error' value. */ ECORE_RESC_LOCK_RESC_INVALID, }; struct ecore_resc_lock_params { /* Resource number [valid values are 0..31] */ u8 resource; /* Lock timeout value in seconds [default, none or 1..254] */ u8 timeout; #define ECORE_MCP_RESC_LOCK_TO_DEFAULT 0 #define ECORE_MCP_RESC_LOCK_TO_NONE 255 /* Number of times to retry locking */ u8 retry_num; #define ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT 10 /* The interval in usec between retries */ u32 retry_interval; #define ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT 10000 /* Use sleep or delay between retries */ bool sleep_b4_retry; /* Will be set as true if the resource is free and granted */ bool b_granted; /* Will be filled with the resource owner. * [0..15 = PF0-15, 16 = MFW, 17 = diag over serial] */ u8 owner; }; /** * @brief Acquires MFW generic resource lock * * @param p_hwfn * @param p_ptt * @param p_params * * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. */ enum _ecore_status_t ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_resc_lock_params *p_params); struct ecore_resc_unlock_params { /* Resource number [valid values are 0..31] */ u8 resource; /* Allow to release a resource even if belongs to another PF */ bool b_force; /* Will be set as true if the resource is released */ bool b_released; }; /** * @brief Releases MFW generic resource lock * * @param p_hwfn * @param p_ptt * @param p_params * * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. */ enum _ecore_status_t ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_resc_unlock_params *p_params); /** * @brief - default initialization for lock/unlock resource structs * * @param p_lock - lock params struct to be initialized; Can be OSAL_NULL * @param p_unlock - unlock params struct to be initialized; Can be OSAL_NULL * @param resource - the requested resource * @paral b_is_permanent - disable retries & aging when set */ void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock, struct ecore_resc_unlock_params *p_unlock, enum ecore_resc_lock resource, bool b_is_permanent); void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 offset, u32 val); /** * @brief Learn of supported MFW features; To be done during early init * * @param p_hwfn * @param p_ptt */ enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); /** * @brief Inform MFW of set of features supported by driver. Should be done * inside the contet of the LOAD_REQ. * * @param p_hwfn * @param p_ptt */ enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); /** * @brief Initialize MFW mailbox and sequence values for driver interaction. * * @param p_hwfn * @param p_ptt */ enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); enum ecore_mcp_drv_attr_cmd { ECORE_MCP_DRV_ATTR_CMD_READ, ECORE_MCP_DRV_ATTR_CMD_WRITE, ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR, ECORE_MCP_DRV_ATTR_CMD_CLEAR, }; struct ecore_mcp_drv_attr { enum ecore_mcp_drv_attr_cmd attr_cmd; u32 attr_num; /* R/RC - will be set with the read value * W - should hold the required value to be written * C - DC */ u32 val; /* W - mask/offset to be applied on the given value * R/RC/C - DC */ u32 mask; u32 offset; }; /** * @brief Handle the drivers' attributes that are kept by the MFW. * * @param p_hwfn * @param p_ptt * @param p_drv_attr */ enum _ecore_status_t ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_mcp_drv_attr *p_drv_attr); /** * @brief Read ufp config from the shared memory. * * @param p_hwfn * @param p_ptt */ void ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); /** * @brief Get the engine affinity configuration. * * @param p_hwfn * @param p_ptt */ enum _ecore_status_t ecore_mcp_get_engine_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); /** * @brief Get the PPFID bitmap. * * @param p_hwfn * @param p_ptt */ enum _ecore_status_t ecore_mcp_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); /** * @brief Acquire MCP lock to access to HW indirection table entries * * @param p_hwfn * @param p_ptt * @param retry_num * @param retry_interval */ enum _ecore_status_t ecore_mcp_ind_table_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u8 retry_num, u32 retry_interval); /** * @brief Release MCP lock of access to HW indirection table entries * * @param p_hwfn * @param p_ptt */ enum _ecore_status_t ecore_mcp_ind_table_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); #endif /* __ECORE_MCP_H__ */ diff --git a/sys/dev/qlnx/qlnxe/qlnx_def.h b/sys/dev/qlnx/qlnxe/qlnx_def.h index 8ac403ab49dc..4342bba89587 100644 --- a/sys/dev/qlnx/qlnxe/qlnx_def.h +++ b/sys/dev/qlnx/qlnxe/qlnx_def.h @@ -1,756 +1,756 @@ /* * Copyright (c) 2017-2018 Cavium, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ /* * File: qlnx_def.h * Author : David C Somayajulu, Cavium Inc., San Jose, CA 95131. */ #ifndef _QLNX_DEF_H_ #define _QLNX_DEF_H_ #define VER_SIZE 16 struct qlnx_ivec { uint32_t rss_idx; void *ha; struct resource *irq; void *handle; int irq_rid; }; typedef struct qlnx_ivec qlnx_ivec_t; //#define QLNX_MAX_RSS 30 #define QLNX_MAX_VF_RSS 4 #define QLNX_MAX_RSS 36 #define QLNX_DEFAULT_RSS 16 #define QLNX_MAX_TC 1 enum QLNX_STATE { QLNX_STATE_CLOSED, QLNX_STATE_OPEN, }; #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) #define MAX_NUM_TC 8 #define MAX_NUM_PRI 8 #ifndef BITS_PER_BYTE #define BITS_PER_BYTE 8 #endif /* #ifndef BITS_PER_BYTE */ /* * RX ring buffer contains pointer to kmalloc() data only, */ struct sw_rx_data { void *data; bus_dmamap_t map; dma_addr_t dma_addr; }; enum qlnx_agg_state { QLNX_AGG_STATE_NONE = 0, QLNX_AGG_STATE_START = 1, QLNX_AGG_STATE_ERROR = 2 }; struct qlnx_agg_info { /* rx_buf is a data buffer that can be placed /consumed from rx bd * chain. It has two purposes: We will preallocate the data buffer * for each aggregation when we open the interface and will place this * buffer on the rx-bd-ring when we receive TPA_START. We don't want * to be in a state where allocation fails, as we can't reuse the * consumer buffer in the rx-chain since FW may still be writing to it * (since header needs to be modified for TPA. * The second purpose is to keep a pointer to the bd buffer during * aggregation. */ struct sw_rx_data rx_buf; enum qlnx_agg_state agg_state; uint16_t placement_offset; struct mbuf *mpf; /* first mbuf in chain */ struct mbuf *mpl; /* last mbuf in chain */ }; #define RX_RING_SIZE_POW 13 #define RX_RING_SIZE (1 << RX_RING_SIZE_POW) #define TX_RING_SIZE_POW 14 #define TX_RING_SIZE (1 << TX_RING_SIZE_POW) struct qlnx_rx_queue { volatile __le16 *hw_cons_ptr; struct sw_rx_data sw_rx_ring[RX_RING_SIZE]; uint16_t sw_rx_cons; uint16_t sw_rx_prod; struct ecore_chain rx_bd_ring; struct ecore_chain rx_comp_ring; void __iomem *hw_rxq_prod_addr; void *handle; /* LRO */ struct qlnx_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM]; uint32_t rx_buf_size; uint16_t num_rx_buffers; uint16_t rxq_id; #ifdef QLNX_SOFT_LRO struct lro_ctrl lro; #endif }; union db_prod { struct eth_db_data data; uint32_t raw; }; struct sw_tx_bd { struct mbuf *mp; bus_dmamap_t map; uint8_t flags; int nsegs; /* Set on the first BD descriptor when there is a split BD */ #define QLNX_TSO_SPLIT_BD (1<<0) }; #define QLNX_MAX_SEGMENTS 255 struct qlnx_tx_queue { int index; /* Queue index */ volatile __le16 *hw_cons_ptr; struct sw_tx_bd sw_tx_ring[TX_RING_SIZE]; uint16_t sw_tx_cons; uint16_t sw_tx_prod; struct ecore_chain tx_pbl; void __iomem *doorbell_addr; void *handle; union db_prod tx_db; bus_dma_segment_t segs[QLNX_MAX_SEGMENTS]; uint16_t num_tx_buffers; }; #define BD_UNMAP_ADDR(bd) HILO_U64(le32toh((bd)->addr.hi), \ le32toh((bd)->addr.lo)) #define BD_UNMAP_LEN(bd) (le16toh((bd)->nbytes)) #define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len) \ do { \ (bd)->addr.hi = htole32(U64_HI(maddr)); \ (bd)->addr.lo = htole32(U64_LO(maddr)); \ (bd)->nbytes = htole16(len); \ } while (0); #define QLNX_FP_MAX_SEGS 24 struct qlnx_fastpath { void *edev; uint8_t rss_id; struct ecore_sb_info *sb_info; struct qlnx_rx_queue *rxq; struct qlnx_tx_queue *txq[MAX_NUM_TC]; char name[64]; struct mtx tx_mtx; char tx_mtx_name[32]; struct buf_ring *tx_br; uint32_t tx_ring_full; struct task fp_task; struct taskqueue *fp_taskqueue; /* transmit statistics */ uint64_t tx_pkts_processed; uint64_t tx_pkts_freed; uint64_t tx_pkts_transmitted; uint64_t tx_pkts_completed; uint64_t tx_tso_pkts; uint64_t tx_non_tso_pkts; #ifdef QLNX_TRACE_PERF_DATA uint64_t tx_pkts_trans_ctx; uint64_t tx_pkts_compl_ctx; uint64_t tx_pkts_trans_fp; uint64_t tx_pkts_compl_fp; uint64_t tx_pkts_compl_intr; #endif uint64_t tx_lso_wnd_min_len; uint64_t tx_defrag; uint64_t tx_nsegs_gt_elem_left; uint32_t tx_tso_max_nsegs; uint32_t tx_tso_min_nsegs; uint32_t tx_tso_max_pkt_len; uint32_t tx_tso_min_pkt_len; uint64_t tx_pkts[QLNX_FP_MAX_SEGS]; #ifdef QLNX_TRACE_PERF_DATA uint64_t tx_pkts_hist[QLNX_FP_MAX_SEGS]; uint64_t tx_comInt[QLNX_FP_MAX_SEGS]; uint64_t tx_pkts_q[QLNX_FP_MAX_SEGS]; #endif uint64_t err_tx_nsegs_gt_elem_left; uint64_t err_tx_dmamap_create; uint64_t err_tx_defrag_dmamap_load; uint64_t err_tx_non_tso_max_seg; uint64_t err_tx_dmamap_load; uint64_t err_tx_defrag; uint64_t err_tx_free_pkt_null; uint64_t err_tx_cons_idx_conflict; uint64_t lro_cnt_64; uint64_t lro_cnt_128; uint64_t lro_cnt_256; uint64_t lro_cnt_512; uint64_t lro_cnt_1024; /* receive statistics */ uint64_t rx_pkts; uint64_t tpa_start; uint64_t tpa_cont; uint64_t tpa_end; uint64_t err_m_getcl; uint64_t err_m_getjcl; uint64_t err_rx_hw_errors; uint64_t err_rx_alloc_errors; uint64_t err_rx_jumbo_chain_pkts; uint64_t err_rx_mp_null; uint64_t err_rx_tpa_invalid_agg_num; }; struct qlnx_update_vport_params { uint8_t vport_id; uint8_t update_vport_active_rx_flg; uint8_t vport_active_rx_flg; uint8_t update_vport_active_tx_flg; uint8_t vport_active_tx_flg; uint8_t update_inner_vlan_removal_flg; uint8_t inner_vlan_removal_flg; struct ecore_rss_params *rss_params; struct ecore_sge_tpa_params *sge_tpa_params; }; /* * link related */ struct qlnx_link_output { bool link_up; uint32_t supported_caps; uint32_t advertised_caps; uint32_t link_partner_caps; uint32_t speed; /* In Mb/s */ bool autoneg; uint32_t media_type; uint32_t duplex; }; typedef struct qlnx_link_output qlnx_link_output_t; #define QLNX_LINK_DUPLEX 0x0001 #define QLNX_LINK_CAP_FIBRE 0x0001 #define QLNX_LINK_CAP_Autoneg 0x0002 #define QLNX_LINK_CAP_Pause 0x0004 #define QLNX_LINK_CAP_Asym_Pause 0x0008 #define QLNX_LINK_CAP_1000baseT_Half 0x0010 #define QLNX_LINK_CAP_1000baseT_Full 0x0020 #define QLNX_LINK_CAP_10000baseKR_Full 0x0040 #define QLNX_LINK_CAP_25000baseKR_Full 0x0080 #define QLNX_LINK_CAP_40000baseLR4_Full 0x0100 #define QLNX_LINK_CAP_50000baseKR2_Full 0x0200 #define QLNX_LINK_CAP_100000baseKR4_Full 0x0400 /* Functions definition */ #define XMIT_PLAIN 0 #define XMIT_L4_CSUM (1 << 0) #define XMIT_LSO (1 << 1) #define CQE_FLAGS_ERR (PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << \ PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT | \ PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << \ PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT | \ PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << \ PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT | \ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << \ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT) #define RX_COPY_THRESH 92 #define ETH_MAX_PACKET_SIZE 1500 #define QLNX_MFW_VERSION_LENGTH 32 #define QLNX_STORMFW_VERSION_LENGTH 32 #define QLNX_TX_ELEM_RESERVE 2 #define QLNX_TX_ELEM_THRESH 128 #define QLNX_TX_ELEM_MAX_THRESH 512 #define QLNX_TX_ELEM_MIN_THRESH 32 #define QLNX_TX_COMPL_THRESH 32 #define QLNX_TPA_MAX_AGG_BUFFERS (20) #define QLNX_MAX_NUM_MULTICAST_ADDRS ECORE_MAX_MC_ADDRS typedef struct _qlnx_mcast { uint16_t rsrvd; uint8_t addr[6]; } __packed qlnx_mcast_t; typedef struct _qlnx_vf_attr { uint8_t mac_addr[ETHER_ADDR_LEN]; uint32_t num_rings; } qlnx_vf_attr_t; typedef struct _qlnx_sriov_task { struct task pf_task; struct taskqueue *pf_taskqueue; #define QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG 0x01 #define QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE 0x02 #define QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE 0x04 volatile uint32_t flags; } qlnx_sriov_task_t; /* * Adapter structure contains the hardware independent information of the * pci function. */ struct qlnx_host { /* interface to ecore */ struct ecore_dev cdev; uint32_t state; /* some flags */ volatile struct { volatile uint32_t hw_init :1, callout_init :1, slowpath_start :1, parent_tag :1, lock_init :1; } flags; /* interface to o.s */ device_t pci_dev; uint8_t pci_func; uint8_t dev_unit; uint16_t device_id; if_t ifp; int if_flags; volatile int link_up; struct ifmedia media; uint16_t max_frame_size; struct cdev *ioctl_dev; /* resources */ struct resource *pci_reg; int reg_rid; struct resource *pci_dbells; int dbells_rid; uint64_t dbells_phys_addr; uint32_t dbells_size; struct resource *msix_bar; int msix_rid; int msix_count; - struct mtx hw_lock; + struct sx hw_lock; /* debug */ uint32_t dbg_level; uint32_t dbg_trace_lro_cnt; uint32_t dbg_trace_tso_pkt_len; uint32_t dp_level; uint32_t dp_module; /* misc */ uint8_t mfw_ver[QLNX_MFW_VERSION_LENGTH]; uint8_t stormfw_ver[QLNX_STORMFW_VERSION_LENGTH]; uint32_t flash_size; /* dma related */ bus_dma_tag_t parent_tag; bus_dma_tag_t tx_tag; bus_dma_tag_t rx_tag; struct ecore_sb_info sb_array[QLNX_MAX_RSS]; struct qlnx_rx_queue rxq_array[QLNX_MAX_RSS]; struct qlnx_tx_queue txq_array[(QLNX_MAX_RSS * MAX_NUM_TC)]; struct qlnx_fastpath fp_array[QLNX_MAX_RSS]; /* tx related */ struct callout tx_callout; uint32_t txr_idx; /* rx related */ uint32_t rx_pkt_threshold; uint32_t rx_jumbo_buf_eq_mtu; /* slow path related */ struct resource *sp_irq[MAX_HWFNS_PER_DEVICE]; void *sp_handle[MAX_HWFNS_PER_DEVICE]; int sp_irq_rid[MAX_HWFNS_PER_DEVICE]; struct task sp_task[MAX_HWFNS_PER_DEVICE]; struct taskqueue *sp_taskqueue[MAX_HWFNS_PER_DEVICE]; struct callout qlnx_callout; /* fast path related */ int num_rss; int num_tc; #define QLNX_MAX_TSS_CNT(ha) ((ha->num_rss) * (ha->num_tc)) qlnx_ivec_t irq_vec[QLNX_MAX_RSS]; uint8_t filter; uint32_t nmcast; qlnx_mcast_t mcast[QLNX_MAX_NUM_MULTICAST_ADDRS]; struct ecore_filter_mcast ecore_mcast; uint8_t primary_mac[ETH_ALEN]; uint8_t prio_to_tc[MAX_NUM_PRI]; struct ecore_eth_stats hw_stats; struct ecore_rss_params rss_params; uint32_t rx_buf_size; bool rx_csum_offload; uint32_t rx_coalesce_usecs; uint32_t tx_coalesce_usecs; /* link related */ qlnx_link_output_t if_link; /* global counters */ uint64_t sp_interrupts; uint64_t err_illegal_intr; uint64_t err_fp_null; uint64_t err_get_proto_invalid_type; /* error recovery related */ uint32_t error_recovery; struct task err_task; struct taskqueue *err_taskqueue; /* grcdump related */ uint32_t err_inject; uint32_t grcdump_taken; uint32_t grcdump_dwords[QLNX_MAX_HW_FUNCS]; uint32_t grcdump_size[QLNX_MAX_HW_FUNCS]; void *grcdump[QLNX_MAX_HW_FUNCS]; uint32_t idle_chk_taken; uint32_t idle_chk_dwords[QLNX_MAX_HW_FUNCS]; uint32_t idle_chk_size[QLNX_MAX_HW_FUNCS]; void *idle_chk[QLNX_MAX_HW_FUNCS]; /* storm stats related */ #define QLNX_STORM_STATS_TOTAL \ (QLNX_MAX_HW_FUNCS * QLNX_STORM_STATS_SAMPLES_PER_HWFN) qlnx_storm_stats_t storm_stats[QLNX_STORM_STATS_TOTAL]; uint32_t storm_stats_index; uint32_t storm_stats_enable; uint32_t storm_stats_gather; uint32_t personality; uint16_t sriov_initialized; uint16_t num_vfs; qlnx_vf_attr_t *vf_attr; qlnx_sriov_task_t sriov_task[MAX_HWFNS_PER_DEVICE]; uint32_t curr_vf; void *next; void *qlnx_rdma; volatile int qlnxr_debug; }; typedef struct qlnx_host qlnx_host_t; /* note that align has to be a power of 2 */ #define QL_ALIGN(size, align) (((size) + ((align) - 1)) & (~((align) - 1))); #define QL_MIN(x, y) ((x < y) ? x : y) #define QL_RUNNING(ifp) \ ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == \ IFF_DRV_RUNNING) #define QLNX_MAX_MTU 9000 #define QLNX_MAX_SEGMENTS_NON_TSO (ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1) //#define QLNX_MAX_TSO_FRAME_SIZE ((64 * 1024 - 1) + 22) #define QLNX_MAX_TSO_FRAME_SIZE 65536 #define QLNX_MAX_TX_MBUF_SIZE 65536 /* bytes - bd_len = 16bits */ #define QL_MAC_CMP(mac1, mac2) \ ((((*(uint32_t *) mac1) == (*(uint32_t *) mac2) && \ (*(uint16_t *)(mac1 + 4)) == (*(uint16_t *)(mac2 + 4)))) ? 0 : 1) #define for_each_rss(i) for (i = 0; i < ha->num_rss; i++) /* * Debug Related */ #ifdef QLNX_DEBUG #define QL_DPRINT1(ha, x, ...) \ do { \ if ((ha)->dbg_level & 0x0001) { \ device_printf ((ha)->pci_dev, \ "[%s:%d]" x, \ __func__, __LINE__, \ ## __VA_ARGS__); \ } \ } while (0) #define QL_DPRINT2(ha, x, ...) \ do { \ if ((ha)->dbg_level & 0x0002) { \ device_printf ((ha)->pci_dev, \ "[%s:%d]" x, \ __func__, __LINE__, \ ## __VA_ARGS__); \ } \ } while (0) #define QL_DPRINT3(ha, x, ...) \ do { \ if ((ha)->dbg_level & 0x0004) { \ device_printf ((ha)->pci_dev, \ "[%s:%d]" x, \ __func__, __LINE__, \ ## __VA_ARGS__); \ } \ } while (0) #define QL_DPRINT4(ha, x, ...) \ do { \ if ((ha)->dbg_level & 0x0008) { \ device_printf ((ha)->pci_dev, \ "[%s:%d]" x, \ __func__, __LINE__, \ ## __VA_ARGS__); \ } \ } while (0) #define QL_DPRINT5(ha, x, ...) \ do { \ if ((ha)->dbg_level & 0x0010) { \ device_printf ((ha)->pci_dev, \ "[%s:%d]" x, \ __func__, __LINE__, \ ## __VA_ARGS__); \ } \ } while (0) #define QL_DPRINT6(ha, x, ...) \ do { \ if ((ha)->dbg_level & 0x0020) { \ device_printf ((ha)->pci_dev, \ "[%s:%d]" x, \ __func__, __LINE__, \ ## __VA_ARGS__); \ } \ } while (0) #define QL_DPRINT7(ha, x, ...) \ do { \ if ((ha)->dbg_level & 0x0040) { \ device_printf ((ha)->pci_dev, \ "[%s:%d]" x, \ __func__, __LINE__, \ ## __VA_ARGS__); \ } \ } while (0) #define QL_DPRINT8(ha, x, ...) \ do { \ if ((ha)->dbg_level & 0x0080) { \ device_printf ((ha)->pci_dev, \ "[%s:%d]" x, \ __func__, __LINE__, \ ## __VA_ARGS__); \ } \ } while (0) #define QL_DPRINT9(ha, x, ...) \ do { \ if ((ha)->dbg_level & 0x0100) { \ device_printf ((ha)->pci_dev, \ "[%s:%d]" x, \ __func__, __LINE__, \ ## __VA_ARGS__); \ } \ } while (0) #define QL_DPRINT11(ha, x, ...) \ do { \ if ((ha)->dbg_level & 0x0400) { \ device_printf ((ha)->pci_dev, \ "[%s:%d]" x, \ __func__, __LINE__, \ ## __VA_ARGS__); \ } \ } while (0) #define QL_DPRINT12(ha, x, ...) \ do { \ if ((ha)->dbg_level & 0x0800) { \ device_printf ((ha)->pci_dev, \ "[%s:%d]" x, \ __func__, __LINE__, \ ## __VA_ARGS__); \ } \ } while (0) #define QL_DPRINT13(ha, x, ...) \ do { \ if ((ha)->dbg_level & 0x1000) { \ device_printf ((ha)->pci_dev, \ "[%s:%d]" x, \ __func__, __LINE__, \ ## __VA_ARGS__); \ } \ } while (0) #else #define QL_DPRINT1(ha, x, ...) #define QL_DPRINT2(ha, x, ...) #define QL_DPRINT3(ha, x, ...) #define QL_DPRINT4(ha, x, ...) #define QL_DPRINT5(ha, x, ...) #define QL_DPRINT6(ha, x, ...) #define QL_DPRINT7(ha, x, ...) #define QL_DPRINT8(ha, x, ...) #define QL_DPRINT9(ha, x, ...) #define QL_DPRINT11(ha, x, ...) #define QL_DPRINT12(ha, x, ...) #define QL_DPRINT13(ha, x, ...) #endif /* #ifdef QLNX_DEBUG */ #define QL_ASSERT(ha, x, y) if (!x) panic y #define QL_ERR_INJECT(ha, val) (ha->err_inject == val) #define QL_RESET_ERR_INJECT(ha, val) {if (ha->err_inject == val) ha->err_inject = 0;} #define QL_ERR_INJCT_TX_INT_DIFF 0x0001 #define QL_ERR_INJCT_TX_INT_MBUF_NULL 0x0002 /* * exported functions */ extern int qlnx_make_cdev(qlnx_host_t *ha); extern void qlnx_del_cdev(qlnx_host_t *ha); extern int qlnx_grc_dump(qlnx_host_t *ha, uint32_t *num_dumped_dwords, int hwfn_index); extern int qlnx_idle_chk(qlnx_host_t *ha, uint32_t *num_dumped_dwords, int hwfn_index); extern uint8_t *qlnx_get_mac_addr(qlnx_host_t *ha); extern void qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn, struct qlnx_link_output *if_link); extern int qlnx_set_lldp_tlvx(qlnx_host_t *ha, qlnx_lldp_sys_tlvs_t *lldp_tlvs); extern int qlnx_vf_device(qlnx_host_t *ha); extern void qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info); extern int qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id); /* * Some OS specific stuff */ #if (defined IFM_100G_SR4) #define QLNX_IFM_100G_SR4 IFM_100G_SR4 #define QLNX_IFM_100G_LR4 IFM_100G_LR4 #define QLNX_IFM_100G_CR4 IFM_100G_CR4 #else #define QLNX_IFM_100G_SR4 IFM_UNKNOWN #define QLNX_IFM_100G_LR4 IFM_UNKNOWN #endif /* #if (defined IFM_100G_SR4) */ #if (defined IFM_25G_SR) #define QLNX_IFM_25G_SR IFM_25G_SR #define QLNX_IFM_25G_CR IFM_25G_CR #else #define QLNX_IFM_25G_SR IFM_UNKNOWN #define QLNX_IFM_25G_CR IFM_UNKNOWN #endif /* #if (defined IFM_25G_SR) */ #define QLNX_INC_IERRORS(ifp) if_inc_counter(ifp, IFCOUNTER_IERRORS, 1) #define QLNX_INC_IQDROPS(ifp) if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1) #define QLNX_INC_IPACKETS(ifp) if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1) #define QLNX_INC_OPACKETS(ifp) if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1) #define QLNX_INC_OBYTES(ifp, len) \ if_inc_counter(ifp, IFCOUNTER_OBYTES, len) #define QLNX_INC_IBYTES(ifp, len) \ if_inc_counter(ha->ifp, IFCOUNTER_IBYTES, len) #define CQE_L3_PACKET(flags) \ ((((flags) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == e_l3_type_ipv4) || \ (((flags) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == e_l3_type_ipv6)) #define CQE_IP_HDR_ERR(flags) \ ((flags) & (PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK \ << PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT)) #define CQE_L4_HAS_CSUM(flags) \ ((flags) & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK \ << PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) #define CQE_HAS_VLAN(flags) \ ((flags) & (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK \ << PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) #ifndef QLNX_RDMA #if defined(__i386__) || defined(__amd64__) static __inline void prefetch(void *x) { __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); } #else #define prefetch(x) #endif #endif #endif /* #ifndef _QLNX_DEF_H_ */ diff --git a/sys/dev/qlnx/qlnxe/qlnx_os.c b/sys/dev/qlnx/qlnxe/qlnx_os.c index de64aaef1b4c..2b3732e748fd 100644 --- a/sys/dev/qlnx/qlnxe/qlnx_os.c +++ b/sys/dev/qlnx/qlnxe/qlnx_os.c @@ -1,8361 +1,8362 @@ /* * Copyright (c) 2017-2018 Cavium, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * File: qlnx_os.c * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131. */ #include #include "qlnx_os.h" #include "bcm_osal.h" #include "reg_addr.h" #include "ecore_gtt_reg_addr.h" #include "ecore.h" #include "ecore_chain.h" #include "ecore_status.h" #include "ecore_hw.h" #include "ecore_rt_defs.h" #include "ecore_init_ops.h" #include "ecore_int.h" #include "ecore_cxt.h" #include "ecore_spq.h" #include "ecore_init_fw_funcs.h" #include "ecore_sp_commands.h" #include "ecore_dev_api.h" #include "ecore_l2_api.h" #include "ecore_mcp.h" #include "ecore_hw_defs.h" #include "mcp_public.h" #include "ecore_iro.h" #include "nvm_cfg.h" #include "ecore_dbg_fw_funcs.h" #include "ecore_iov_api.h" #include "ecore_vf_api.h" #include "qlnx_ioctl.h" #include "qlnx_def.h" #include "qlnx_ver.h" #ifdef QLNX_ENABLE_IWARP #include "qlnx_rdma.h" #endif /* #ifdef QLNX_ENABLE_IWARP */ #ifdef CONFIG_ECORE_SRIOV #include #include #include #endif /* #ifdef CONFIG_ECORE_SRIOV */ #include /* * static functions */ /* * ioctl related functions */ static void qlnx_add_sysctls(qlnx_host_t *ha); /* * main driver */ static void qlnx_release(qlnx_host_t *ha); static void qlnx_fp_isr(void *arg); static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha); static void qlnx_init(void *arg); static void qlnx_init_locked(qlnx_host_t *ha); static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi); static int qlnx_set_promisc(qlnx_host_t *ha, int enabled); static int qlnx_set_allmulti(qlnx_host_t *ha, int enabled); static int qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data); static int qlnx_media_change(if_t ifp); static void qlnx_media_status(if_t ifp, struct ifmediareq *ifmr); static void qlnx_stop(qlnx_host_t *ha); static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp); static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha); static uint32_t qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link); static int qlnx_transmit(if_t ifp, struct mbuf *mp); static int qlnx_transmit_locked(if_t ifp, struct qlnx_fastpath *fp, struct mbuf *mp); static void qlnx_qflush(if_t ifp); static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha); static void qlnx_free_parent_dma_tag(qlnx_host_t *ha); static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha); static void qlnx_free_tx_dma_tag(qlnx_host_t *ha); static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha); static void qlnx_free_rx_dma_tag(qlnx_host_t *ha); static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver); static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size); static int qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params); static int qlnx_nic_start(struct ecore_dev *cdev); static int qlnx_slowpath_start(qlnx_host_t *ha); static int qlnx_slowpath_stop(qlnx_host_t *ha); static int qlnx_init_hw(qlnx_host_t *ha); static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], char ver_str[VER_SIZE]); static void qlnx_unload(qlnx_host_t *ha); static int qlnx_load(qlnx_host_t *ha); static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, uint32_t add_mac); static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len); static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq); static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq); static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq); static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter); static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index); static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index); static void qlnx_timer(void *arg); static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); static void qlnx_trigger_dump(qlnx_host_t *ha); static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct qlnx_tx_queue *txq); static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct qlnx_tx_queue *txq); static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, int lro_enable); static void qlnx_fp_taskqueue(void *context, int pending); static void qlnx_sample_storm_stats(qlnx_host_t *ha); static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, struct qlnx_agg_info *tpa); static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa); static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt); /* * Hooks to the Operating Systems */ static int qlnx_pci_probe (device_t); static int qlnx_pci_attach (device_t); static int qlnx_pci_detach (device_t); #ifndef QLNX_VF #ifdef CONFIG_ECORE_SRIOV static int qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params); static void qlnx_iov_uninit(device_t dev); static int qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params); static void qlnx_initialize_sriov(qlnx_host_t *ha); static void qlnx_pf_taskqueue(void *context, int pending); static int qlnx_create_pf_taskqueues(qlnx_host_t *ha); static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha); static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha); #endif /* #ifdef CONFIG_ECORE_SRIOV */ static device_method_t qlnx_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, qlnx_pci_probe), DEVMETHOD(device_attach, qlnx_pci_attach), DEVMETHOD(device_detach, qlnx_pci_detach), #ifdef CONFIG_ECORE_SRIOV DEVMETHOD(pci_iov_init, qlnx_iov_init), DEVMETHOD(pci_iov_uninit, qlnx_iov_uninit), DEVMETHOD(pci_iov_add_vf, qlnx_iov_add_vf), #endif /* #ifdef CONFIG_ECORE_SRIOV */ { 0, 0 } }; static driver_t qlnx_pci_driver = { "ql", qlnx_pci_methods, sizeof (qlnx_host_t), }; MODULE_VERSION(if_qlnxe,1); DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, 0, 0); MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1); MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1); #else static device_method_t qlnxv_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, qlnx_pci_probe), DEVMETHOD(device_attach, qlnx_pci_attach), DEVMETHOD(device_detach, qlnx_pci_detach), { 0, 0 } }; static driver_t qlnxv_pci_driver = { "ql", qlnxv_pci_methods, sizeof (qlnx_host_t), }; MODULE_VERSION(if_qlnxev,1); DRIVER_MODULE(if_qlnxev, pci, qlnxv_pci_driver, 0, 0); MODULE_DEPEND(if_qlnxev, pci, 1, 1, 1); MODULE_DEPEND(if_qlnxev, ether, 1, 1, 1); #endif /* #ifdef QLNX_VF */ MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver"); char qlnx_dev_str[128]; char qlnx_ver_str[VER_SIZE]; char qlnx_name_str[NAME_SIZE]; /* * Some PCI Configuration Space Related Defines */ #ifndef PCI_VENDOR_QLOGIC #define PCI_VENDOR_QLOGIC 0x1077 #endif /* 40G Adapter QLE45xxx*/ #ifndef QLOGIC_PCI_DEVICE_ID_1634 #define QLOGIC_PCI_DEVICE_ID_1634 0x1634 #endif /* 100G Adapter QLE45xxx*/ #ifndef QLOGIC_PCI_DEVICE_ID_1644 #define QLOGIC_PCI_DEVICE_ID_1644 0x1644 #endif /* 25G Adapter QLE45xxx*/ #ifndef QLOGIC_PCI_DEVICE_ID_1656 #define QLOGIC_PCI_DEVICE_ID_1656 0x1656 #endif /* 50G Adapter QLE45xxx*/ #ifndef QLOGIC_PCI_DEVICE_ID_1654 #define QLOGIC_PCI_DEVICE_ID_1654 0x1654 #endif /* 10G/25G/40G Adapter QLE41xxx*/ #ifndef QLOGIC_PCI_DEVICE_ID_8070 #define QLOGIC_PCI_DEVICE_ID_8070 0x8070 #endif /* SRIOV Device (All Speeds) Adapter QLE41xxx*/ #ifndef QLOGIC_PCI_DEVICE_ID_8090 #define QLOGIC_PCI_DEVICE_ID_8090 0x8090 #endif SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "qlnxe driver parameters"); /* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */ static int qlnxe_queue_count = QLNX_DEFAULT_RSS; SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, &qlnxe_queue_count, 0, "Multi-Queue queue count"); /* * Note on RDMA personality setting * * Read the personality configured in NVRAM * If the personality is ETH_ONLY, ETH_IWARP or ETH_ROCE and * the configured personality in sysctl is QLNX_PERSONALITY_DEFAULT * use the personality in NVRAM. * Otherwise use t the personality configured in sysctl. * */ #define QLNX_PERSONALITY_DEFAULT 0x0 /* use personality in NVRAM */ #define QLNX_PERSONALITY_ETH_ONLY 0x1 /* Override with ETH_ONLY */ #define QLNX_PERSONALITY_ETH_IWARP 0x2 /* Override with ETH_IWARP */ #define QLNX_PERSONALITY_ETH_ROCE 0x3 /* Override with ETH_ROCE */ #define QLNX_PERSONALITY_BITS_PER_FUNC 4 #define QLNX_PERSONALIY_MASK 0xF /* RDMA configuration; 64bit field allows setting for 16 physical functions*/ static uint64_t qlnxe_rdma_configuration = 0x22222222; SYSCTL_U64(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN, &qlnxe_rdma_configuration, 0, "RDMA Configuration"); int qlnx_vf_device(qlnx_host_t *ha) { uint16_t device_id; device_id = ha->device_id; if (device_id == QLOGIC_PCI_DEVICE_ID_8090) return 0; return -1; } static int qlnx_valid_device(qlnx_host_t *ha) { uint16_t device_id; device_id = ha->device_id; #ifndef QLNX_VF if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) || (device_id == QLOGIC_PCI_DEVICE_ID_1644) || (device_id == QLOGIC_PCI_DEVICE_ID_1656) || (device_id == QLOGIC_PCI_DEVICE_ID_1654) || (device_id == QLOGIC_PCI_DEVICE_ID_8070)) return 0; #else if (device_id == QLOGIC_PCI_DEVICE_ID_8090) return 0; #endif /* #ifndef QLNX_VF */ return -1; } #ifdef QLNX_ENABLE_IWARP static int qlnx_rdma_supported(struct qlnx_host *ha) { uint16_t device_id; device_id = pci_get_device(ha->pci_dev); if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) || (device_id == QLOGIC_PCI_DEVICE_ID_1656) || (device_id == QLOGIC_PCI_DEVICE_ID_1654) || (device_id == QLOGIC_PCI_DEVICE_ID_8070)) return (0); return (-1); } #endif /* #ifdef QLNX_ENABLE_IWARP */ /* * Name: qlnx_pci_probe * Function: Validate the PCI device to be a QLA80XX device */ static int qlnx_pci_probe(device_t dev) { snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d", QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD); snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx"); if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) { return (ENXIO); } switch (pci_get_device(dev)) { #ifndef QLNX_VF case QLOGIC_PCI_DEVICE_ID_1644: snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", "Qlogic 100GbE PCI CNA Adapter-Ethernet Function", QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD); device_set_desc_copy(dev, qlnx_dev_str); break; case QLOGIC_PCI_DEVICE_ID_1634: snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", "Qlogic 40GbE PCI CNA Adapter-Ethernet Function", QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD); device_set_desc_copy(dev, qlnx_dev_str); break; case QLOGIC_PCI_DEVICE_ID_1656: snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", "Qlogic 25GbE PCI CNA Adapter-Ethernet Function", QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD); device_set_desc_copy(dev, qlnx_dev_str); break; case QLOGIC_PCI_DEVICE_ID_1654: snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", "Qlogic 50GbE PCI CNA Adapter-Ethernet Function", QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD); device_set_desc_copy(dev, qlnx_dev_str); break; case QLOGIC_PCI_DEVICE_ID_8070: snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", "Qlogic 10GbE/25GbE/40GbE PCI CNA (AH)" " Adapter-Ethernet Function", QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD); device_set_desc_copy(dev, qlnx_dev_str); break; #else case QLOGIC_PCI_DEVICE_ID_8090: snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", "Qlogic SRIOV PCI CNA (AH) " "Adapter-Ethernet Function", QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD); device_set_desc_copy(dev, qlnx_dev_str); break; #endif /* #ifndef QLNX_VF */ default: return (ENXIO); } #ifdef QLNX_ENABLE_IWARP qlnx_rdma_init(); #endif /* #ifdef QLNX_ENABLE_IWARP */ return (BUS_PROBE_DEFAULT); } static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct qlnx_tx_queue *txq) { u16 hw_bd_cons; u16 ecore_cons_idx; hw_bd_cons = le16toh(*txq->hw_cons_ptr); ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl); return (hw_bd_cons - ecore_cons_idx); } static void qlnx_sp_intr(void *arg) { struct ecore_hwfn *p_hwfn; qlnx_host_t *ha; int i; p_hwfn = arg; if (p_hwfn == NULL) { printf("%s: spurious slowpath intr\n", __func__); return; } ha = (qlnx_host_t *)p_hwfn->p_dev; QL_DPRINT2(ha, "enter\n"); for (i = 0; i < ha->cdev.num_hwfns; i++) { if (&ha->cdev.hwfns[i] == p_hwfn) { taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]); break; } } QL_DPRINT2(ha, "exit\n"); return; } static void qlnx_sp_taskqueue(void *context, int pending) { struct ecore_hwfn *p_hwfn; p_hwfn = context; if (p_hwfn != NULL) { qlnx_sp_isr(p_hwfn); } return; } static int qlnx_create_sp_taskqueues(qlnx_host_t *ha) { int i; uint8_t tq_name[32]; for (i = 0; i < ha->cdev.num_hwfns; i++) { struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; bzero(tq_name, sizeof (tq_name)); snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i); TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn); ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT, taskqueue_thread_enqueue, &ha->sp_taskqueue[i]); if (ha->sp_taskqueue[i] == NULL) return (-1); taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s", tq_name); QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]); } return (0); } static void qlnx_destroy_sp_taskqueues(qlnx_host_t *ha) { int i; for (i = 0; i < ha->cdev.num_hwfns; i++) { if (ha->sp_taskqueue[i] != NULL) { taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]); taskqueue_free(ha->sp_taskqueue[i]); } } return; } static void qlnx_fp_taskqueue(void *context, int pending) { struct qlnx_fastpath *fp; qlnx_host_t *ha; if_t ifp; fp = context; if (fp == NULL) return; ha = (qlnx_host_t *)fp->edev; ifp = ha->ifp; if(if_getdrvflags(ifp) & IFF_DRV_RUNNING) { if (!drbr_empty(ifp, fp->tx_br)) { if(mtx_trylock(&fp->tx_mtx)) { #ifdef QLNX_TRACE_PERF_DATA tx_pkts = fp->tx_pkts_transmitted; tx_compl = fp->tx_pkts_completed; #endif qlnx_transmit_locked(ifp, fp, NULL); #ifdef QLNX_TRACE_PERF_DATA fp->tx_pkts_trans_fp += (fp->tx_pkts_transmitted - tx_pkts); fp->tx_pkts_compl_fp += (fp->tx_pkts_completed - tx_compl); #endif mtx_unlock(&fp->tx_mtx); } } } QL_DPRINT2(ha, "exit \n"); return; } static int qlnx_create_fp_taskqueues(qlnx_host_t *ha) { int i; uint8_t tq_name[32]; struct qlnx_fastpath *fp; for (i = 0; i < ha->num_rss; i++) { fp = &ha->fp_array[i]; bzero(tq_name, sizeof (tq_name)); snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp); fp->fp_taskqueue = taskqueue_create(tq_name, M_NOWAIT, taskqueue_thread_enqueue, &fp->fp_taskqueue); if (fp->fp_taskqueue == NULL) return (-1); taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s", tq_name); QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue); } return (0); } static void qlnx_destroy_fp_taskqueues(qlnx_host_t *ha) { int i; struct qlnx_fastpath *fp; for (i = 0; i < ha->num_rss; i++) { fp = &ha->fp_array[i]; if (fp->fp_taskqueue != NULL) { taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); taskqueue_free(fp->fp_taskqueue); fp->fp_taskqueue = NULL; } } return; } static void qlnx_drain_fp_taskqueues(qlnx_host_t *ha) { int i; struct qlnx_fastpath *fp; for (i = 0; i < ha->num_rss; i++) { fp = &ha->fp_array[i]; if (fp->fp_taskqueue != NULL) { QLNX_UNLOCK(ha); taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); QLNX_LOCK(ha); } } return; } static void qlnx_get_params(qlnx_host_t *ha) { if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) { device_printf(ha->pci_dev, "invalid queue_count value (%d)\n", qlnxe_queue_count); qlnxe_queue_count = 0; } return; } static void qlnx_error_recovery_taskqueue(void *context, int pending) { qlnx_host_t *ha; ha = context; QL_DPRINT2(ha, "enter\n"); QLNX_LOCK(ha); qlnx_stop(ha); QLNX_UNLOCK(ha); #ifdef QLNX_ENABLE_IWARP qlnx_rdma_dev_remove(ha); #endif /* #ifdef QLNX_ENABLE_IWARP */ qlnx_slowpath_stop(ha); qlnx_slowpath_start(ha); #ifdef QLNX_ENABLE_IWARP qlnx_rdma_dev_add(ha); #endif /* #ifdef QLNX_ENABLE_IWARP */ qlnx_init(ha); callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); QL_DPRINT2(ha, "exit\n"); return; } static int qlnx_create_error_recovery_taskqueue(qlnx_host_t *ha) { uint8_t tq_name[32]; bzero(tq_name, sizeof (tq_name)); snprintf(tq_name, sizeof (tq_name), "ql_err_tq"); TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha); ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT, taskqueue_thread_enqueue, &ha->err_taskqueue); if (ha->err_taskqueue == NULL) return (-1); taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name); QL_DPRINT1(ha, "%p\n",ha->err_taskqueue); return (0); } static void qlnx_destroy_error_recovery_taskqueue(qlnx_host_t *ha) { if (ha->err_taskqueue != NULL) { taskqueue_drain(ha->err_taskqueue, &ha->err_task); taskqueue_free(ha->err_taskqueue); } ha->err_taskqueue = NULL; return; } /* * Name: qlnx_pci_attach * Function: attaches the device to the operating system */ static int qlnx_pci_attach(device_t dev) { qlnx_host_t *ha = NULL; uint32_t rsrc_len_reg __unused = 0; uint32_t rsrc_len_dbells = 0; uint32_t rsrc_len_msix __unused = 0; int i; uint32_t mfw_ver; uint32_t num_sp_msix = 0; uint32_t num_rdma_irqs = 0; if ((ha = device_get_softc(dev)) == NULL) { device_printf(dev, "cannot get softc\n"); return (ENOMEM); } memset(ha, 0, sizeof (qlnx_host_t)); ha->device_id = pci_get_device(dev); if (qlnx_valid_device(ha) != 0) { device_printf(dev, "device is not valid device\n"); return (ENXIO); } ha->pci_func = pci_get_function(dev); ha->pci_dev = dev; - mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); + sx_init(&ha->hw_lock, "qlnx_hw_lock"); ha->flags.lock_init = 1; pci_enable_busmaster(dev); /* * map the PCI BARs */ ha->reg_rid = PCIR_BAR(0); ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, RF_ACTIVE); if (ha->pci_reg == NULL) { device_printf(dev, "unable to map BAR0\n"); goto qlnx_pci_attach_err; } rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, ha->reg_rid); ha->dbells_rid = PCIR_BAR(2); rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, ha->dbells_rid); if (rsrc_len_dbells) { ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->dbells_rid, RF_ACTIVE); if (ha->pci_dbells == NULL) { device_printf(dev, "unable to map BAR1\n"); goto qlnx_pci_attach_err; } ha->dbells_phys_addr = (uint64_t) bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid); ha->dbells_size = rsrc_len_dbells; } else { if (qlnx_vf_device(ha) != 0) { device_printf(dev, " BAR1 size is zero\n"); goto qlnx_pci_attach_err; } } ha->msix_rid = PCIR_BAR(4); ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->msix_rid, RF_ACTIVE); if (ha->msix_bar == NULL) { device_printf(dev, "unable to map BAR2\n"); goto qlnx_pci_attach_err; } rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, ha->msix_rid); ha->dbg_level = 0x0000; QL_DPRINT1(ha, "\n\t\t\t" "pci_dev = %p pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x" "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x" "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x" " msix_avail = 0x%x " "\n\t\t\t[ncpus = %d]\n", ha->pci_dev, ha->pci_reg, rsrc_len_reg, ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid, ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev), mp_ncpus); /* * allocate dma tags */ if (qlnx_alloc_parent_dma_tag(ha)) goto qlnx_pci_attach_err; if (qlnx_alloc_tx_dma_tag(ha)) goto qlnx_pci_attach_err; if (qlnx_alloc_rx_dma_tag(ha)) goto qlnx_pci_attach_err; if (qlnx_init_hw(ha) != 0) goto qlnx_pci_attach_err; ha->flags.hw_init = 1; qlnx_get_params(ha); if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) && (qlnxe_queue_count == QLNX_DEFAULT_RSS)) { qlnxe_queue_count = QLNX_MAX_RSS; } /* * Allocate MSI-x vectors */ if (qlnx_vf_device(ha) != 0) { if (qlnxe_queue_count == 0) ha->num_rss = QLNX_DEFAULT_RSS; else ha->num_rss = qlnxe_queue_count; num_sp_msix = ha->cdev.num_hwfns; } else { uint8_t max_rxq; uint8_t max_txq; ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq); ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq); if (max_rxq < max_txq) ha->num_rss = max_rxq; else ha->num_rss = max_txq; if (ha->num_rss > QLNX_MAX_VF_RSS) ha->num_rss = QLNX_MAX_VF_RSS; num_sp_msix = 0; } if (ha->num_rss > mp_ncpus) ha->num_rss = mp_ncpus; ha->num_tc = QLNX_MAX_TC; ha->msix_count = pci_msix_count(dev); #ifdef QLNX_ENABLE_IWARP num_rdma_irqs = qlnx_rdma_get_num_irqs(ha); #endif /* #ifdef QLNX_ENABLE_IWARP */ if (!ha->msix_count || (ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) { device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, ha->msix_count); goto qlnx_pci_attach_err; } if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs)) ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs; else ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs); QL_DPRINT1(ha, "\n\t\t\t" "pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x" "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x" "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x" " msix_avail = 0x%x msix_alloc = 0x%x" "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n", ha->pci_reg, rsrc_len_reg, ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid, ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev), ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc); if (pci_alloc_msix(dev, &ha->msix_count)) { device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__, ha->msix_count); ha->msix_count = 0; goto qlnx_pci_attach_err; } /* * Initialize slow path interrupt and task queue */ if (num_sp_msix) { if (qlnx_create_sp_taskqueues(ha) != 0) goto qlnx_pci_attach_err; for (i = 0; i < ha->cdev.num_hwfns; i++) { struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; ha->sp_irq_rid[i] = i + 1; ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ha->sp_irq_rid[i], (RF_ACTIVE | RF_SHAREABLE)); if (ha->sp_irq[i] == NULL) { device_printf(dev, "could not allocate mbx interrupt\n"); goto qlnx_pci_attach_err; } if (bus_setup_intr(dev, ha->sp_irq[i], (INTR_TYPE_NET | INTR_MPSAFE), NULL, qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) { device_printf(dev, "could not setup slow path interrupt\n"); goto qlnx_pci_attach_err; } QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d" " sp_irq %p sp_handle %p\n", p_hwfn, ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]); } } /* * initialize fast path interrupt */ if (qlnx_create_fp_taskqueues(ha) != 0) goto qlnx_pci_attach_err; for (i = 0; i < ha->num_rss; i++) { ha->irq_vec[i].rss_idx = i; ha->irq_vec[i].ha = ha; ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i; ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ha->irq_vec[i].irq_rid, (RF_ACTIVE | RF_SHAREABLE)); if (ha->irq_vec[i].irq == NULL) { device_printf(dev, "could not allocate interrupt[%d] irq_rid = %d\n", i, ha->irq_vec[i].irq_rid); goto qlnx_pci_attach_err; } if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) { device_printf(dev, "could not allocate tx_br[%d]\n", i); goto qlnx_pci_attach_err; } } if (qlnx_vf_device(ha) != 0) { callout_init(&ha->qlnx_callout, 1); ha->flags.callout_init = 1; for (i = 0; i < ha->cdev.num_hwfns; i++) { if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0) goto qlnx_pci_attach_err; if (ha->grcdump_size[i] == 0) goto qlnx_pci_attach_err; ha->grcdump_size[i] = ha->grcdump_size[i] << 2; QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n", i, ha->grcdump_size[i]); ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]); if (ha->grcdump[i] == NULL) { device_printf(dev, "grcdump alloc[%d] failed\n", i); goto qlnx_pci_attach_err; } if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0) goto qlnx_pci_attach_err; if (ha->idle_chk_size[i] == 0) goto qlnx_pci_attach_err; ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2; QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n", i, ha->idle_chk_size[i]); ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]); if (ha->idle_chk[i] == NULL) { device_printf(dev, "idle_chk alloc failed\n"); goto qlnx_pci_attach_err; } } if (qlnx_create_error_recovery_taskqueue(ha) != 0) goto qlnx_pci_attach_err; } if (qlnx_slowpath_start(ha) != 0) goto qlnx_pci_attach_err; else ha->flags.slowpath_start = 1; if (qlnx_vf_device(ha) != 0) { if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) { qlnx_mdelay(__func__, 1000); qlnx_trigger_dump(ha); goto qlnx_pci_attach_err0; } if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) { qlnx_mdelay(__func__, 1000); qlnx_trigger_dump(ha); goto qlnx_pci_attach_err0; } } else { struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0]; ecore_mcp_get_mfw_ver(p_hwfn, NULL, &mfw_ver, NULL); } snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d", ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF), ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF)); snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d", FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION, FW_ENGINEERING_VERSION); QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n", ha->stormfw_ver, ha->mfw_ver); qlnx_init_ifnet(dev, ha); /* * add sysctls */ qlnx_add_sysctls(ha); qlnx_pci_attach_err0: /* * create ioctl device interface */ if (qlnx_vf_device(ha) != 0) { if (qlnx_make_cdev(ha)) { device_printf(dev, "%s: ql_make_cdev failed\n", __func__); goto qlnx_pci_attach_err; } #ifdef QLNX_ENABLE_IWARP qlnx_rdma_dev_add(ha); #endif /* #ifdef QLNX_ENABLE_IWARP */ } #ifndef QLNX_VF #ifdef CONFIG_ECORE_SRIOV if (qlnx_vf_device(ha) != 0) qlnx_initialize_sriov(ha); #endif /* #ifdef CONFIG_ECORE_SRIOV */ #endif /* #ifdef QLNX_VF */ QL_DPRINT2(ha, "success\n"); return (0); qlnx_pci_attach_err: qlnx_release(ha); return (ENXIO); } /* * Name: qlnx_pci_detach * Function: Unhooks the device from the operating system */ static int qlnx_pci_detach(device_t dev) { qlnx_host_t *ha = NULL; if ((ha = device_get_softc(dev)) == NULL) { device_printf(dev, "%s: cannot get softc\n", __func__); return (ENOMEM); } if (qlnx_vf_device(ha) != 0) { #ifdef CONFIG_ECORE_SRIOV int ret; ret = pci_iov_detach(dev); if (ret) { device_printf(dev, "%s: SRIOV in use\n", __func__); return (ret); } #endif /* #ifdef CONFIG_ECORE_SRIOV */ #ifdef QLNX_ENABLE_IWARP if (qlnx_rdma_dev_remove(ha) != 0) return (EBUSY); #endif /* #ifdef QLNX_ENABLE_IWARP */ } QLNX_LOCK(ha); qlnx_stop(ha); QLNX_UNLOCK(ha); qlnx_release(ha); return (0); } #ifdef QLNX_ENABLE_IWARP static uint8_t qlnx_get_personality(uint8_t pci_func) { uint8_t personality; personality = (qlnxe_rdma_configuration >> (pci_func * QLNX_PERSONALITY_BITS_PER_FUNC)) & QLNX_PERSONALIY_MASK; return (personality); } static void qlnx_set_personality(qlnx_host_t *ha) { uint8_t personality; personality = qlnx_get_personality(ha->pci_func); switch (personality) { case QLNX_PERSONALITY_DEFAULT: device_printf(ha->pci_dev, "%s: DEFAULT\n", __func__); ha->personality = ECORE_PCI_DEFAULT; break; case QLNX_PERSONALITY_ETH_ONLY: device_printf(ha->pci_dev, "%s: ETH_ONLY\n", __func__); ha->personality = ECORE_PCI_ETH; break; case QLNX_PERSONALITY_ETH_IWARP: device_printf(ha->pci_dev, "%s: ETH_IWARP\n", __func__); ha->personality = ECORE_PCI_ETH_IWARP; break; case QLNX_PERSONALITY_ETH_ROCE: device_printf(ha->pci_dev, "%s: ETH_ROCE\n", __func__); ha->personality = ECORE_PCI_ETH_ROCE; break; } return; } #endif /* #ifdef QLNX_ENABLE_IWARP */ static int qlnx_init_hw(qlnx_host_t *ha) { int rval = 0; struct ecore_hw_prepare_params params; + ha->cdev.ha = ha; ecore_init_struct(&ha->cdev); /* ha->dp_module = ECORE_MSG_PROBE | ECORE_MSG_INTR | ECORE_MSG_SP | ECORE_MSG_LINK | ECORE_MSG_SPQ | ECORE_MSG_RDMA; ha->dp_level = ECORE_LEVEL_VERBOSE;*/ //ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2; ha->dp_level = ECORE_LEVEL_NOTICE; //ha->dp_level = ECORE_LEVEL_VERBOSE; ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev); ha->cdev.regview = ha->pci_reg; ha->personality = ECORE_PCI_DEFAULT; if (qlnx_vf_device(ha) == 0) { ha->cdev.b_is_vf = true; if (ha->pci_dbells != NULL) { ha->cdev.doorbells = ha->pci_dbells; ha->cdev.db_phys_addr = ha->dbells_phys_addr; ha->cdev.db_size = ha->dbells_size; } else { ha->pci_dbells = ha->pci_reg; } } else { ha->cdev.doorbells = ha->pci_dbells; ha->cdev.db_phys_addr = ha->dbells_phys_addr; ha->cdev.db_size = ha->dbells_size; #ifdef QLNX_ENABLE_IWARP if (qlnx_rdma_supported(ha) == 0) qlnx_set_personality(ha); #endif /* #ifdef QLNX_ENABLE_IWARP */ } QL_DPRINT2(ha, "%s: %s\n", __func__, (ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet")); bzero(¶ms, sizeof (struct ecore_hw_prepare_params)); params.personality = ha->personality; params.drv_resc_alloc = false; params.chk_reg_fifo = false; params.initiate_pf_flr = true; params.epoch = 0; ecore_hw_prepare(&ha->cdev, ¶ms); qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str); QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n", ha, &ha->cdev, &ha->cdev.hwfns[0]); return (rval); } static void qlnx_release(qlnx_host_t *ha) { device_t dev; int i; dev = ha->pci_dev; QL_DPRINT2(ha, "enter\n"); for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) { if (ha->idle_chk[i] != NULL) { free(ha->idle_chk[i], M_QLNXBUF); ha->idle_chk[i] = NULL; } if (ha->grcdump[i] != NULL) { free(ha->grcdump[i], M_QLNXBUF); ha->grcdump[i] = NULL; } } if (ha->flags.callout_init) callout_drain(&ha->qlnx_callout); if (ha->flags.slowpath_start) { qlnx_slowpath_stop(ha); } if (ha->flags.hw_init) ecore_hw_remove(&ha->cdev); qlnx_del_cdev(ha); if (ha->ifp != NULL) ether_ifdetach(ha->ifp); qlnx_free_tx_dma_tag(ha); qlnx_free_rx_dma_tag(ha); qlnx_free_parent_dma_tag(ha); if (qlnx_vf_device(ha) != 0) { qlnx_destroy_error_recovery_taskqueue(ha); } for (i = 0; i < ha->num_rss; i++) { struct qlnx_fastpath *fp = &ha->fp_array[i]; if (ha->irq_vec[i].handle) { (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, ha->irq_vec[i].handle); } if (ha->irq_vec[i].irq) { (void)bus_release_resource(dev, SYS_RES_IRQ, ha->irq_vec[i].irq_rid, ha->irq_vec[i].irq); } qlnx_free_tx_br(ha, fp); } qlnx_destroy_fp_taskqueues(ha); for (i = 0; i < ha->cdev.num_hwfns; i++) { if (ha->sp_handle[i]) (void)bus_teardown_intr(dev, ha->sp_irq[i], ha->sp_handle[i]); if (ha->sp_irq[i]) (void) bus_release_resource(dev, SYS_RES_IRQ, ha->sp_irq_rid[i], ha->sp_irq[i]); } qlnx_destroy_sp_taskqueues(ha); if (ha->msix_count) pci_release_msi(dev); if (ha->flags.lock_init) { - mtx_destroy(&ha->hw_lock); + sx_destroy(&ha->hw_lock); } if (ha->pci_reg) (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, ha->pci_reg); if (ha->dbells_size && ha->pci_dbells) (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid, ha->pci_dbells); if (ha->msix_bar) (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid, ha->msix_bar); QL_DPRINT2(ha, "exit\n"); return; } static void qlnx_trigger_dump(qlnx_host_t *ha) { int i; if (ha->ifp != NULL) if_setdrvflagbits(ha->ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING)); QL_DPRINT2(ha, "enter\n"); if (qlnx_vf_device(ha) == 0) return; ha->error_recovery = 1; for (i = 0; i < ha->cdev.num_hwfns; i++) { qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i); qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i); } QL_DPRINT2(ha, "exit\n"); return; } static int qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS) { int err, ret = 0; qlnx_host_t *ha; err = sysctl_handle_int(oidp, &ret, 0, req); if (err || !req->newptr) return (err); if (ret == 1) { ha = (qlnx_host_t *)arg1; qlnx_trigger_dump(ha); } return (err); } static int qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS) { int err, i, ret = 0, usecs = 0; qlnx_host_t *ha; struct ecore_hwfn *p_hwfn; struct qlnx_fastpath *fp; err = sysctl_handle_int(oidp, &usecs, 0, req); if (err || !req->newptr || !usecs || (usecs > 255)) return (err); ha = (qlnx_host_t *)arg1; if (qlnx_vf_device(ha) == 0) return (-1); for (i = 0; i < ha->num_rss; i++) { p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; fp = &ha->fp_array[i]; if (fp->txq[0]->handle != NULL) { ret = ecore_set_queue_coalesce(p_hwfn, 0, (uint16_t)usecs, fp->txq[0]->handle); } } if (!ret) ha->tx_coalesce_usecs = (uint8_t)usecs; return (err); } static int qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS) { int err, i, ret = 0, usecs = 0; qlnx_host_t *ha; struct ecore_hwfn *p_hwfn; struct qlnx_fastpath *fp; err = sysctl_handle_int(oidp, &usecs, 0, req); if (err || !req->newptr || !usecs || (usecs > 255)) return (err); ha = (qlnx_host_t *)arg1; if (qlnx_vf_device(ha) == 0) return (-1); for (i = 0; i < ha->num_rss; i++) { p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; fp = &ha->fp_array[i]; if (fp->rxq->handle != NULL) { ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs, 0, fp->rxq->handle); } } if (!ret) ha->rx_coalesce_usecs = (uint8_t)usecs; return (err); } static void qlnx_add_sp_stats_sysctls(qlnx_host_t *ha) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid *ctx_oid; ctx = device_get_sysctl_ctx(ha->pci_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "spstat"); children = SYSCTL_CHILDREN(ctx_oid); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "sp_interrupts", CTLFLAG_RD, &ha->sp_interrupts, "No. of slowpath interrupts"); return; } static void qlnx_add_fp_stats_sysctls(qlnx_host_t *ha) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid_list *node_children; struct sysctl_oid *ctx_oid; int i, j; uint8_t name_str[16]; ctx = device_get_sysctl_ctx(ha->pci_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "fpstat"); children = SYSCTL_CHILDREN(ctx_oid); for (i = 0; i < ha->num_rss; i++) { bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); snprintf(name_str, sizeof(name_str), "%d", i); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str); node_children = SYSCTL_CHILDREN(ctx_oid); /* Tx Related */ SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "tx_pkts_processed", CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed, "No. of packets processed for transmission"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "tx_pkts_freed", CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed, "No. of freed packets"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "tx_pkts_transmitted", CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted, "No. of transmitted packets"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "tx_pkts_completed", CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed, "No. of transmit completions"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "tx_non_tso_pkts", CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts, "No. of non LSO transmited packets"); #ifdef QLNX_TRACE_PERF_DATA SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "tx_pkts_trans_ctx", CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx, "No. of transmitted packets in transmit context"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "tx_pkts_compl_ctx", CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx, "No. of transmit completions in transmit context"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "tx_pkts_trans_fp", CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp, "No. of transmitted packets in taskqueue"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "tx_pkts_compl_fp", CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp, "No. of transmit completions in taskqueue"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "tx_pkts_compl_intr", CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr, "No. of transmit completions in interrupt ctx"); #endif SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "tx_tso_pkts", CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts, "No. of LSO transmited packets"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "tx_lso_wnd_min_len", CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len, "tx_lso_wnd_min_len"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "tx_defrag", CTLFLAG_RD, &ha->fp_array[i].tx_defrag, "tx_defrag"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "tx_nsegs_gt_elem_left", CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left, "tx_nsegs_gt_elem_left"); SYSCTL_ADD_UINT(ctx, node_children, OID_AUTO, "tx_tso_max_nsegs", CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs, ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs"); SYSCTL_ADD_UINT(ctx, node_children, OID_AUTO, "tx_tso_min_nsegs", CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs, ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs"); SYSCTL_ADD_UINT(ctx, node_children, OID_AUTO, "tx_tso_max_pkt_len", CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len, ha->fp_array[i].tx_tso_max_pkt_len, "tx_tso_max_pkt_len"); SYSCTL_ADD_UINT(ctx, node_children, OID_AUTO, "tx_tso_min_pkt_len", CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len, ha->fp_array[i].tx_tso_min_pkt_len, "tx_tso_min_pkt_len"); for (j = 0; j < QLNX_FP_MAX_SEGS; j++) { bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); snprintf(name_str, sizeof(name_str), "tx_pkts_nseg_%02d", (j+1)); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, name_str, CTLFLAG_RD, &ha->fp_array[i].tx_pkts[j], name_str); } #ifdef QLNX_TRACE_PERF_DATA for (j = 0; j < 18; j++) { bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); snprintf(name_str, sizeof(name_str), "tx_pkts_hist_%02d", (j+1)); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, name_str, CTLFLAG_RD, &ha->fp_array[i].tx_pkts_hist[j], name_str); } for (j = 0; j < 5; j++) { bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); snprintf(name_str, sizeof(name_str), "tx_comInt_%02d", (j+1)); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, name_str, CTLFLAG_RD, &ha->fp_array[i].tx_comInt[j], name_str); } for (j = 0; j < 18; j++) { bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); snprintf(name_str, sizeof(name_str), "tx_pkts_q_%02d", (j+1)); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, name_str, CTLFLAG_RD, &ha->fp_array[i].tx_pkts_q[j], name_str); } #endif SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "err_tx_nsegs_gt_elem_left", CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left, "err_tx_nsegs_gt_elem_left"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "err_tx_dmamap_create", CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create, "err_tx_dmamap_create"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "err_tx_defrag_dmamap_load", CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load, "err_tx_defrag_dmamap_load"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "err_tx_non_tso_max_seg", CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg, "err_tx_non_tso_max_seg"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "err_tx_dmamap_load", CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load, "err_tx_dmamap_load"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "err_tx_defrag", CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag, "err_tx_defrag"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "err_tx_free_pkt_null", CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null, "err_tx_free_pkt_null"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "err_tx_cons_idx_conflict", CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict, "err_tx_cons_idx_conflict"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "lro_cnt_64", CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64, "lro_cnt_64"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "lro_cnt_128", CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128, "lro_cnt_128"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "lro_cnt_256", CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256, "lro_cnt_256"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "lro_cnt_512", CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512, "lro_cnt_512"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "lro_cnt_1024", CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024, "lro_cnt_1024"); /* Rx Related */ SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "rx_pkts", CTLFLAG_RD, &ha->fp_array[i].rx_pkts, "No. of received packets"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "tpa_start", CTLFLAG_RD, &ha->fp_array[i].tpa_start, "No. of tpa_start packets"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "tpa_cont", CTLFLAG_RD, &ha->fp_array[i].tpa_cont, "No. of tpa_cont packets"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "tpa_end", CTLFLAG_RD, &ha->fp_array[i].tpa_end, "No. of tpa_end packets"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "err_m_getcl", CTLFLAG_RD, &ha->fp_array[i].err_m_getcl, "err_m_getcl"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "err_m_getjcl", CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl, "err_m_getjcl"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "err_rx_hw_errors", CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors, "err_rx_hw_errors"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "err_rx_alloc_errors", CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors, "err_rx_alloc_errors"); } return; } static void qlnx_add_hw_stats_sysctls(qlnx_host_t *ha) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid *ctx_oid; ctx = device_get_sysctl_ctx(ha->pci_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "hwstat"); children = SYSCTL_CHILDREN(ctx_oid); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "no_buff_discards", CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards, "No. of packets discarded due to lack of buffer"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "packet_too_big_discard", CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard, "No. of packets discarded because packet was too big"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "ttl0_discard", CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard, "ttl0_discard"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_ucast_bytes", CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes, "rx_ucast_bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_mcast_bytes", CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes, "rx_mcast_bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_bcast_bytes", CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes, "rx_bcast_bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_ucast_pkts", CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts, "rx_ucast_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_mcast_pkts", CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts, "rx_mcast_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_bcast_pkts", CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts, "rx_bcast_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "mftag_filter_discards", CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards, "mftag_filter_discards"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "mac_filter_discards", CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards, "mac_filter_discards"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_ucast_bytes", CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes, "tx_ucast_bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_mcast_bytes", CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes, "tx_mcast_bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_bcast_bytes", CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes, "tx_bcast_bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_ucast_pkts", CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts, "tx_ucast_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_mcast_pkts", CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts, "tx_mcast_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_bcast_pkts", CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts, "tx_bcast_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_err_drop_pkts", CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts, "tx_err_drop_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tpa_coalesced_pkts", CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts, "tpa_coalesced_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tpa_coalesced_events", CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events, "tpa_coalesced_events"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tpa_aborts_num", CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num, "tpa_aborts_num"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tpa_not_coalesced_pkts", CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts, "tpa_not_coalesced_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tpa_coalesced_bytes", CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes, "tpa_coalesced_bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_64_byte_packets", CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets, "rx_64_byte_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_65_to_127_byte_packets", CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets, "rx_65_to_127_byte_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_128_to_255_byte_packets", CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets, "rx_128_to_255_byte_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_256_to_511_byte_packets", CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets, "rx_256_to_511_byte_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_512_to_1023_byte_packets", CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets, "rx_512_to_1023_byte_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_1024_to_1518_byte_packets", CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets, "rx_1024_to_1518_byte_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_1519_to_1522_byte_packets", CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets, "rx_1519_to_1522_byte_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_1523_to_2047_byte_packets", CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets, "rx_1523_to_2047_byte_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_2048_to_4095_byte_packets", CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets, "rx_2048_to_4095_byte_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_4096_to_9216_byte_packets", CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets, "rx_4096_to_9216_byte_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_9217_to_16383_byte_packets", CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets, "rx_9217_to_16383_byte_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_crc_errors", CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors, "rx_crc_errors"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_mac_crtl_frames", CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames, "rx_mac_crtl_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_pause_frames", CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames, "rx_pause_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_pfc_frames", CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames, "rx_pfc_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_align_errors", CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors, "rx_align_errors"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_carrier_errors", CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors, "rx_carrier_errors"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_oversize_packets", CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets, "rx_oversize_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_jabbers", CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers, "rx_jabbers"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_undersize_packets", CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets, "rx_undersize_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_fragments", CTLFLAG_RD, &ha->hw_stats.common.rx_fragments, "rx_fragments"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_64_byte_packets", CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets, "tx_64_byte_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_65_to_127_byte_packets", CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets, "tx_65_to_127_byte_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_128_to_255_byte_packets", CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets, "tx_128_to_255_byte_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_256_to_511_byte_packets", CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets, "tx_256_to_511_byte_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_512_to_1023_byte_packets", CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets, "tx_512_to_1023_byte_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_1024_to_1518_byte_packets", CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets, "tx_1024_to_1518_byte_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_1519_to_2047_byte_packets", CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets, "tx_1519_to_2047_byte_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_2048_to_4095_byte_packets", CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets, "tx_2048_to_4095_byte_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_4096_to_9216_byte_packets", CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets, "tx_4096_to_9216_byte_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_9217_to_16383_byte_packets", CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets, "tx_9217_to_16383_byte_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_pause_frames", CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames, "tx_pause_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_pfc_frames", CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames, "tx_pfc_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_lpi_entry_count", CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count, "tx_lpi_entry_count"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_total_collisions", CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions, "tx_total_collisions"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "brb_truncates", CTLFLAG_RD, &ha->hw_stats.common.brb_truncates, "brb_truncates"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "brb_discards", CTLFLAG_RD, &ha->hw_stats.common.brb_discards, "brb_discards"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_mac_bytes", CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes, "rx_mac_bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_mac_uc_packets", CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets, "rx_mac_uc_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_mac_mc_packets", CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets, "rx_mac_mc_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_mac_bc_packets", CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets, "rx_mac_bc_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_mac_frames_ok", CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok, "rx_mac_frames_ok"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_mac_bytes", CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes, "tx_mac_bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_mac_uc_packets", CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets, "tx_mac_uc_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_mac_mc_packets", CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets, "tx_mac_mc_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_mac_bc_packets", CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets, "tx_mac_bc_packets"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_mac_ctrl_frames", CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames, "tx_mac_ctrl_frames"); return; } static void qlnx_add_sysctls(qlnx_host_t *ha) { device_t dev = ha->pci_dev; struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; ctx = device_get_sysctl_ctx(dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); qlnx_add_fp_stats_sysctls(ha); qlnx_add_sp_stats_sysctls(ha); if (qlnx_vf_device(ha) != 0) qlnx_add_hw_stats_sysctls(ha); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version", CTLFLAG_RD, qlnx_ver_str, 0, "Driver Version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version", CTLFLAG_RD, ha->stormfw_ver, 0, "STORM Firmware Version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version", CTLFLAG_RD, ha->mfw_ver, 0, "Management Firmware Version"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "personality", CTLFLAG_RD, &ha->personality, ha->personality, "\tpersonality = 0 => Ethernet Only\n" "\tpersonality = 3 => Ethernet and RoCE\n" "\tpersonality = 4 => Ethernet and iWARP\n" "\tpersonality = 6 => Default in Shared Memory\n"); ha->dbg_level = 0; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "debug", CTLFLAG_RW, &ha->dbg_level, ha->dbg_level, "Debug Level"); ha->dp_level = 0x01; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "dp_level", CTLFLAG_RW, &ha->dp_level, ha->dp_level, "DP Level"); ha->dbg_trace_lro_cnt = 0; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW, &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt, "Trace LRO Counts"); ha->dbg_trace_tso_pkt_len = 0; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW, &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len, "Trace TSO packet lengths"); ha->dp_module = 0; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "dp_module", CTLFLAG_RW, &ha->dp_module, ha->dp_module, "DP Module"); ha->err_inject = 0; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "err_inject", CTLFLAG_RW, &ha->err_inject, ha->err_inject, "Error Inject"); ha->storm_stats_enable = 0; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "storm_stats_enable", CTLFLAG_RW, &ha->storm_stats_enable, ha->storm_stats_enable, "Enable Storm Statistics Gathering"); ha->storm_stats_index = 0; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "storm_stats_index", CTLFLAG_RD, &ha->storm_stats_index, ha->storm_stats_index, "Enable Storm Statistics Gathering Current Index"); ha->grcdump_taken = 0; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_taken", CTLFLAG_RD, &ha->grcdump_taken, ha->grcdump_taken, "grcdump_taken"); ha->idle_chk_taken = 0; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "idle_chk_taken", CTLFLAG_RD, &ha->idle_chk_taken, ha->idle_chk_taken, "idle_chk_taken"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD, &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs, "rx_coalesce_usecs"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD, &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs, "tx_coalesce_usecs"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "trigger_dump", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0, qlnx_trigger_dump_sysctl, "I", "trigger_dump"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "set_rx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0, qlnx_set_rx_coalesce, "I", "rx interrupt coalesce period microseconds"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "set_tx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0, qlnx_set_tx_coalesce, "I", "tx interrupt coalesce period microseconds"); ha->rx_pkt_threshold = 128; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW, &ha->rx_pkt_threshold, ha->rx_pkt_threshold, "No. of Rx Pkts to process at a time"); ha->rx_jumbo_buf_eq_mtu = 0; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW, &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu, "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n" "otherwise Rx Jumbo buffers are set to >= MTU size\n"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "err_illegal_intr", CTLFLAG_RD, &ha->err_illegal_intr, "err_illegal_intr"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "err_fp_null", CTLFLAG_RD, &ha->err_fp_null, "err_fp_null"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD, &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type"); return; } /***************************************************************************** * Operating System Network Interface Functions *****************************************************************************/ static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha) { uint16_t device_id; if_t ifp; ifp = ha->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); device_id = pci_get_device(ha->pci_dev); if (device_id == QLOGIC_PCI_DEVICE_ID_1634) if_setbaudrate(ifp, IF_Gbps(40)); else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) || (device_id == QLOGIC_PCI_DEVICE_ID_8070)) if_setbaudrate(ifp, IF_Gbps(25)); else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) if_setbaudrate(ifp, IF_Gbps(50)); else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) if_setbaudrate(ifp, IF_Gbps(100)); if_setcapabilities(ifp, IFCAP_LINKSTATE); if_setinitfn(ifp, qlnx_init); if_setsoftc(ifp, ha); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setioctlfn(ifp, qlnx_ioctl); if_settransmitfn(ifp, qlnx_transmit); if_setqflushfn(ifp, qlnx_qflush); if_setsendqlen(ifp, qlnx_get_ifq_snd_maxlen(ha)); if_setsendqready(ifp); if_setgetcounterfn(ifp, qlnx_get_counter); ha->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN; memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN); if (!ha->primary_mac[0] && !ha->primary_mac[1] && !ha->primary_mac[2] && !ha->primary_mac[3] && !ha->primary_mac[4] && !ha->primary_mac[5]) { uint32_t rnd; rnd = arc4random(); ha->primary_mac[0] = 0x00; ha->primary_mac[1] = 0x0e; ha->primary_mac[2] = 0x1e; ha->primary_mac[3] = rnd & 0xFF; ha->primary_mac[4] = (rnd >> 8) & 0xFF; ha->primary_mac[5] = (rnd >> 16) & 0xFF; } ether_ifattach(ifp, ha->primary_mac); bcopy(if_getlladdr(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN); if_setcapabilities(ifp, IFCAP_HWCSUM); if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU, 0); if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0); if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0); if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER, 0); if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0); if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTSO, 0); if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0); if_setcapabilitiesbit(ifp, IFCAP_TSO6, 0); if_setcapabilitiesbit(ifp, IFCAP_LRO, 0); if_sethwtsomax(ifp, QLNX_MAX_TSO_FRAME_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)); if_sethwtsomaxsegcount(ifp, QLNX_MAX_SEGMENTS - 1); /* hdr */ if_sethwtsomaxsegsize(ifp, QLNX_MAX_TX_MBUF_SIZE); if_setcapenable(ifp, if_getcapabilities(ifp)); if_sethwassist(ifp, CSUM_IP); if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP, 0); if_sethwassistbits(ifp, CSUM_TCP_IPV6 | CSUM_UDP_IPV6, 0); if_sethwassistbits(ifp, CSUM_TSO, 0); if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\ qlnx_media_status); if (device_id == QLOGIC_PCI_DEVICE_ID_1634) { ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL); ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL); ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL); } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) || (device_id == QLOGIC_PCI_DEVICE_ID_8070)) { ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL); ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL); } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) { ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL); ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL); } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) { ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL); ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL); ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL); } ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL); ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); QL_DPRINT2(ha, "exit\n"); return; } static void qlnx_init_locked(qlnx_host_t *ha) { if_t ifp = ha->ifp; QL_DPRINT1(ha, "Driver Initialization start \n"); qlnx_stop(ha); if (qlnx_load(ha) == 0) { if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); #ifdef QLNX_ENABLE_IWARP if (qlnx_vf_device(ha) != 0) { qlnx_rdma_dev_open(ha); } #endif /* #ifdef QLNX_ENABLE_IWARP */ } return; } static void qlnx_init(void *arg) { qlnx_host_t *ha; ha = (qlnx_host_t *)arg; QL_DPRINT2(ha, "enter\n"); QLNX_LOCK(ha); qlnx_init_locked(ha); QLNX_UNLOCK(ha); QL_DPRINT2(ha, "exit\n"); return; } static int qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac) { struct ecore_filter_mcast *mcast; struct ecore_dev *cdev; int rc; cdev = &ha->cdev; mcast = &ha->ecore_mcast; bzero(mcast, sizeof(struct ecore_filter_mcast)); if (add_mac) mcast->opcode = ECORE_FILTER_ADD; else mcast->opcode = ECORE_FILTER_REMOVE; mcast->num_mc_addrs = 1; memcpy(mcast->mac, mac_addr, ETH_ALEN); rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); return (rc); } static int qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta) { int i; for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) return 0; /* its been already added */ } for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { if ((ha->mcast[i].addr[0] == 0) && (ha->mcast[i].addr[1] == 0) && (ha->mcast[i].addr[2] == 0) && (ha->mcast[i].addr[3] == 0) && (ha->mcast[i].addr[4] == 0) && (ha->mcast[i].addr[5] == 0)) { if (qlnx_config_mcast_mac_addr(ha, mta, 1)) return (-1); bcopy(mta, ha->mcast[i].addr, ETH_ALEN); ha->nmcast++; return 0; } } return 0; } static int qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta) { int i; for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) { if (qlnx_config_mcast_mac_addr(ha, mta, 0)) return (-1); ha->mcast[i].addr[0] = 0; ha->mcast[i].addr[1] = 0; ha->mcast[i].addr[2] = 0; ha->mcast[i].addr[3] = 0; ha->mcast[i].addr[4] = 0; ha->mcast[i].addr[5] = 0; ha->nmcast--; return 0; } } return 0; } /* * Name: qls_hw_set_multi * Function: Sets the Multicast Addresses provided the host O.S into the * hardware (for the given interface) */ static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, uint32_t add_mac) { int i; for (i = 0; i < mcnt; i++) { if (add_mac) { if (qlnx_hw_add_mcast(ha, mta)) break; } else { if (qlnx_hw_del_mcast(ha, mta)) break; } mta += ETHER_HDR_LEN; } return; } static u_int qlnx_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt) { uint8_t *mta = arg; if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS) return (0); bcopy(LLADDR(sdl), &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN); return (1); } static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi) { uint8_t mta[QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN]; if_t ifp = ha->ifp; u_int mcnt; if (qlnx_vf_device(ha) == 0) return (0); mcnt = if_foreach_llmaddr(ifp, qlnx_copy_maddr, mta); QLNX_LOCK(ha); qlnx_hw_set_multi(ha, mta, mcnt, add_multi); QLNX_UNLOCK(ha); return (0); } static int qlnx_set_promisc(qlnx_host_t *ha, int enabled) { int rc = 0; uint8_t filter; if (qlnx_vf_device(ha) == 0) return (0); filter = ha->filter; if (enabled) { filter |= ECORE_ACCEPT_MCAST_UNMATCHED; filter |= ECORE_ACCEPT_UCAST_UNMATCHED; } else { filter &= ~ECORE_ACCEPT_MCAST_UNMATCHED; filter &= ~ECORE_ACCEPT_UCAST_UNMATCHED; } rc = qlnx_set_rx_accept_filter(ha, filter); return (rc); } static int qlnx_set_allmulti(qlnx_host_t *ha, int enabled) { int rc = 0; uint8_t filter; if (qlnx_vf_device(ha) == 0) return (0); filter = ha->filter; if (enabled) { filter |= ECORE_ACCEPT_MCAST_UNMATCHED; } else { filter &= ~ECORE_ACCEPT_MCAST_UNMATCHED; } rc = qlnx_set_rx_accept_filter(ha, filter); return (rc); } static int qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data) { int ret = 0, mask; struct ifreq *ifr = (struct ifreq *)data; #ifdef INET struct ifaddr *ifa = (struct ifaddr *)data; #endif qlnx_host_t *ha; ha = (qlnx_host_t *)if_getsoftc(ifp); switch (cmd) { case SIOCSIFADDR: QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd); #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) { if_setflagbits(ifp, IFF_UP, 0); if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { QLNX_LOCK(ha); qlnx_init_locked(ha); QLNX_UNLOCK(ha); } QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr)); arp_ifinit(ifp, ifa); break; } #endif ether_ioctl(ifp, cmd, data); break; case SIOCSIFMTU: QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd); if (ifr->ifr_mtu > QLNX_MAX_MTU) { ret = EINVAL; } else { QLNX_LOCK(ha); if_setmtu(ifp, ifr->ifr_mtu); ha->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN; if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { qlnx_init_locked(ha); } QLNX_UNLOCK(ha); } break; case SIOCSIFFLAGS: QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd); QLNX_LOCK(ha); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { if ((if_getflags(ifp) ^ ha->if_flags) & IFF_PROMISC) { ret = qlnx_set_promisc(ha, ifp->if_flags & IFF_PROMISC); } else if ((if_getflags(ifp) ^ ha->if_flags) & IFF_ALLMULTI) { ret = qlnx_set_allmulti(ha, ifp->if_flags & IFF_ALLMULTI); } } else { ha->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN; qlnx_init_locked(ha); } } else { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) qlnx_stop(ha); } ha->if_flags = if_getflags(ifp); QLNX_UNLOCK(ha); break; case SIOCADDMULTI: QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { if (qlnx_set_multi(ha, 1)) ret = EINVAL; } break; case SIOCDELMULTI: QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { if (qlnx_set_multi(ha, 0)) ret = EINVAL; } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd); ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd); if (mask & IFCAP_HWCSUM) if_togglecapenable(ifp, IFCAP_HWCSUM); if (mask & IFCAP_TSO4) if_togglecapenable(ifp, IFCAP_TSO4); if (mask & IFCAP_TSO6) if_togglecapenable(ifp, IFCAP_TSO6); if (mask & IFCAP_VLAN_HWTAGGING) if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); if (mask & IFCAP_VLAN_HWTSO) if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); if (mask & IFCAP_LRO) if_togglecapenable(ifp, IFCAP_LRO); QLNX_LOCK(ha); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) qlnx_init_locked(ha); QLNX_UNLOCK(ha); VLAN_CAPABILITIES(ifp); break; case SIOCGI2C: { struct ifi2creq i2c; struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0]; struct ecore_ptt *p_ptt; ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); if (ret) break; if ((i2c.len > sizeof (i2c.data)) || (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) { ret = EINVAL; break; } p_ptt = ecore_ptt_acquire(p_hwfn); if (!p_ptt) { QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); ret = -1; break; } ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt, (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset, i2c.len, &i2c.data[0]); ecore_ptt_release(p_hwfn, p_ptt); if (ret) { ret = -1; break; } ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \ len = %d addr = 0x%02x offset = 0x%04x \ data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \ 0x%02x 0x%02x 0x%02x\n", ret, i2c.len, i2c.dev_addr, i2c.offset, i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3], i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]); break; } default: QL_DPRINT4(ha, "default (0x%lx)\n", cmd); ret = ether_ioctl(ifp, cmd, data); break; } return (ret); } static int qlnx_media_change(if_t ifp) { qlnx_host_t *ha; struct ifmedia *ifm; int ret = 0; ha = (qlnx_host_t *)if_getsoftc(ifp); QL_DPRINT2(ha, "enter\n"); ifm = &ha->media; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) ret = EINVAL; QL_DPRINT2(ha, "exit\n"); return (ret); } static void qlnx_media_status(if_t ifp, struct ifmediareq *ifmr) { qlnx_host_t *ha; ha = (qlnx_host_t *)if_getsoftc(ifp); QL_DPRINT2(ha, "enter\n"); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (ha->link_up) { ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= (IFM_FDX | qlnx_get_optics(ha, &ha->if_link)); if (ha->if_link.link_partner_caps & (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause)) ifmr->ifm_active |= (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE); } QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down")); return; } static void qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct qlnx_tx_queue *txq) { u16 idx; struct mbuf *mp; bus_dmamap_t map; int i; // struct eth_tx_bd *tx_data_bd; struct eth_tx_1st_bd *first_bd; int nbds = 0; idx = txq->sw_tx_cons; mp = txq->sw_tx_ring[idx].mp; map = txq->sw_tx_ring[idx].map; if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){ QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL); QL_DPRINT1(ha, "(mp == NULL) " " tx_idx = 0x%x" " ecore_prod_idx = 0x%x" " ecore_cons_idx = 0x%x" " hw_bd_cons = 0x%x" " txq_db_last = 0x%x" " elem_left = 0x%x\n", fp->rss_id, ecore_chain_get_prod_idx(&txq->tx_pbl), ecore_chain_get_cons_idx(&txq->tx_pbl), le16toh(*txq->hw_cons_ptr), txq->tx_db.raw, ecore_chain_get_elem_left(&txq->tx_pbl)); fp->err_tx_free_pkt_null++; //DEBUG qlnx_trigger_dump(ha); return; } else { QLNX_INC_OPACKETS((ha->ifp)); QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len)); bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ha->tx_tag, map); fp->tx_pkts_freed++; fp->tx_pkts_completed++; m_freem(mp); } first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl); nbds = first_bd->data.nbds; // BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0); for (i = 1; i < nbds; i++) { /* tx_data_bd = */ ecore_chain_consume(&txq->tx_pbl); // BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0); } txq->sw_tx_ring[idx].flags = 0; txq->sw_tx_ring[idx].mp = NULL; txq->sw_tx_ring[idx].map = (bus_dmamap_t)0; return; } static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct qlnx_tx_queue *txq) { u16 hw_bd_cons; u16 ecore_cons_idx; uint16_t diff; uint16_t idx, idx2; hw_bd_cons = le16toh(*txq->hw_cons_ptr); while (hw_bd_cons != (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { diff = hw_bd_cons - ecore_cons_idx; if ((diff > TX_RING_SIZE) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){ QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF); QL_DPRINT1(ha, "(diff = 0x%x) " " tx_idx = 0x%x" " ecore_prod_idx = 0x%x" " ecore_cons_idx = 0x%x" " hw_bd_cons = 0x%x" " txq_db_last = 0x%x" " elem_left = 0x%x\n", diff, fp->rss_id, ecore_chain_get_prod_idx(&txq->tx_pbl), ecore_chain_get_cons_idx(&txq->tx_pbl), le16toh(*txq->hw_cons_ptr), txq->tx_db.raw, ecore_chain_get_elem_left(&txq->tx_pbl)); fp->err_tx_cons_idx_conflict++; //DEBUG qlnx_trigger_dump(ha); } idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1); idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1); prefetch(txq->sw_tx_ring[idx].mp); prefetch(txq->sw_tx_ring[idx2].mp); qlnx_free_tx_pkt(ha, fp, txq); txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1); } return; } static int qlnx_transmit_locked(if_t ifp, struct qlnx_fastpath *fp, struct mbuf *mp) { int ret = 0; struct qlnx_tx_queue *txq; qlnx_host_t * ha; uint16_t elem_left; txq = fp->txq[0]; ha = (qlnx_host_t *)fp->edev; if ((!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) || (!ha->link_up)) { if(mp != NULL) ret = drbr_enqueue(ifp, fp->tx_br, mp); return (ret); } if(mp != NULL) ret = drbr_enqueue(ifp, fp->tx_br, mp); mp = drbr_peek(ifp, fp->tx_br); while (mp != NULL) { if (qlnx_send(ha, fp, &mp)) { if (mp != NULL) { drbr_putback(ifp, fp->tx_br, mp); } else { fp->tx_pkts_processed++; drbr_advance(ifp, fp->tx_br); } goto qlnx_transmit_locked_exit; } else { drbr_advance(ifp, fp->tx_br); fp->tx_pkts_transmitted++; fp->tx_pkts_processed++; } mp = drbr_peek(ifp, fp->tx_br); } qlnx_transmit_locked_exit: if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) || ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) < QLNX_TX_ELEM_MAX_THRESH)) (void)qlnx_tx_int(ha, fp, fp->txq[0]); QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret); return ret; } static int qlnx_transmit(if_t ifp, struct mbuf *mp) { qlnx_host_t *ha = (qlnx_host_t *)if_getsoftc(ifp); struct qlnx_fastpath *fp; int rss_id = 0, ret = 0; #ifdef QLNX_TRACEPERF_DATA uint64_t tx_pkts = 0, tx_compl = 0; #endif QL_DPRINT2(ha, "enter\n"); if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) % ha->num_rss; fp = &ha->fp_array[rss_id]; if (fp->tx_br == NULL) { ret = EINVAL; goto qlnx_transmit_exit; } if (mtx_trylock(&fp->tx_mtx)) { #ifdef QLNX_TRACEPERF_DATA tx_pkts = fp->tx_pkts_transmitted; tx_compl = fp->tx_pkts_completed; #endif ret = qlnx_transmit_locked(ifp, fp, mp); #ifdef QLNX_TRACEPERF_DATA fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts); fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl); #endif mtx_unlock(&fp->tx_mtx); } else { if (mp != NULL && (fp->fp_taskqueue != NULL)) { ret = drbr_enqueue(ifp, fp->tx_br, mp); taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); } } qlnx_transmit_exit: QL_DPRINT2(ha, "exit ret = %d\n", ret); return ret; } static void qlnx_qflush(if_t ifp) { int rss_id; struct qlnx_fastpath *fp; struct mbuf *mp; qlnx_host_t *ha; ha = (qlnx_host_t *)if_getsoftc(ifp); QL_DPRINT2(ha, "enter\n"); for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { fp = &ha->fp_array[rss_id]; if (fp == NULL) continue; if (fp->tx_br) { mtx_lock(&fp->tx_mtx); while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { fp->tx_pkts_freed++; m_freem(mp); } mtx_unlock(&fp->tx_mtx); } } QL_DPRINT2(ha, "exit\n"); return; } static void qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value) { uint32_t offset; offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells); bus_write_4(ha->pci_dbells, offset, value); bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ); bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ); return; } static uint32_t qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp) { struct ether_vlan_header *eh = NULL; struct ip *ip = NULL; struct ip6_hdr *ip6 = NULL; struct tcphdr *th = NULL; uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0; uint16_t etype = 0; uint8_t buf[sizeof(struct ip6_hdr)]; eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; etype = ntohs(eh->evl_proto); } else { ehdrlen = ETHER_HDR_LEN; etype = ntohs(eh->evl_encap_proto); } switch (etype) { case ETHERTYPE_IP: ip = (struct ip *)(mp->m_data + ehdrlen); ip_hlen = sizeof (struct ip); if (mp->m_len < (ehdrlen + ip_hlen)) { m_copydata(mp, ehdrlen, sizeof(struct ip), buf); ip = (struct ip *)buf; } th = (struct tcphdr *)(ip + 1); offset = ip_hlen + ehdrlen + (th->th_off << 2); break; case ETHERTYPE_IPV6: ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); ip_hlen = sizeof(struct ip6_hdr); if (mp->m_len < (ehdrlen + ip_hlen)) { m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr), buf); ip6 = (struct ip6_hdr *)buf; } th = (struct tcphdr *)(ip6 + 1); offset = ip_hlen + ehdrlen + (th->th_off << 2); break; default: break; } return (offset); } static __inline int qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs, uint32_t offset) { int i; uint32_t sum, nbds_in_hdr = 1; uint32_t window; bus_dma_segment_t *s_seg; /* If the header spans multiple segments, skip those segments */ if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM) return (0); i = 0; while ((i < nsegs) && (offset >= segs->ds_len)) { offset = offset - segs->ds_len; segs++; i++; nbds_in_hdr++; } window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr; nsegs = nsegs - i; while (nsegs >= window) { sum = 0; s_seg = segs; for (i = 0; i < window; i++){ sum += s_seg->ds_len; s_seg++; } if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) { fp->tx_lso_wnd_min_len++; return (-1); } nsegs = nsegs - 1; segs++; } return (0); } static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp) { bus_dma_segment_t *segs; bus_dmamap_t map = 0; uint32_t nsegs = 0; int ret = -1; struct mbuf *m_head = *m_headp; uint16_t idx = 0; uint16_t elem_left; uint8_t nbd = 0; struct qlnx_tx_queue *txq; struct eth_tx_1st_bd *first_bd; struct eth_tx_2nd_bd *second_bd; struct eth_tx_3rd_bd *third_bd; struct eth_tx_bd *tx_data_bd; int seg_idx = 0; uint32_t nbds_in_hdr = 0; uint32_t offset = 0; #ifdef QLNX_TRACE_PERF_DATA uint16_t bd_used; #endif QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id); if (!ha->link_up) return (-1); first_bd = NULL; second_bd = NULL; third_bd = NULL; tx_data_bd = NULL; txq = fp->txq[0]; if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) < QLNX_TX_ELEM_MIN_THRESH) { fp->tx_nsegs_gt_elem_left++; fp->err_tx_nsegs_gt_elem_left++; return (ENOBUFS); } idx = txq->sw_tx_prod; map = txq->sw_tx_ring[idx].map; segs = txq->segs; ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (ha->dbg_trace_tso_pkt_len) { if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { if (!fp->tx_tso_min_pkt_len) { fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; } else { if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len) fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len) fp->tx_tso_max_pkt_len = m_head->m_pkthdr.len; } } } if (m_head->m_pkthdr.csum_flags & CSUM_TSO) offset = qlnx_tcp_offset(ha, m_head); if ((ret == EFBIG) || ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && ( (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) || ((m_head->m_pkthdr.csum_flags & CSUM_TSO) && qlnx_tso_check(fp, segs, nsegs, offset))))) { struct mbuf *m; QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len); fp->tx_defrag++; m = m_defrag(m_head, M_NOWAIT); if (m == NULL) { fp->err_tx_defrag++; fp->tx_pkts_freed++; m_freem(m_head); *m_headp = NULL; QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret); return (ENOBUFS); } m_head = m; *m_headp = m_head; if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, BUS_DMA_NOWAIT))) { fp->err_tx_defrag_dmamap_load++; QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n", ret, m_head->m_pkthdr.len); fp->tx_pkts_freed++; m_freem(m_head); *m_headp = NULL; return (ret); } if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) { fp->err_tx_non_tso_max_seg++; QL_DPRINT1(ha, "(%d) nsegs too many for non-TSO [%d, %d]\n", ret, nsegs, m_head->m_pkthdr.len); fp->tx_pkts_freed++; m_freem(m_head); *m_headp = NULL; return (ret); } if (m_head->m_pkthdr.csum_flags & CSUM_TSO) offset = qlnx_tcp_offset(ha, m_head); } else if (ret) { fp->err_tx_dmamap_load++; QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n", ret, m_head->m_pkthdr.len); fp->tx_pkts_freed++; m_freem(m_head); *m_headp = NULL; return (ret); } QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet")); if (ha->dbg_trace_tso_pkt_len) { if (nsegs < QLNX_FP_MAX_SEGS) fp->tx_pkts[(nsegs - 1)]++; else fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++; } #ifdef QLNX_TRACE_PERF_DATA if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { if(m_head->m_pkthdr.len <= 2048) fp->tx_pkts_hist[0]++; else if((m_head->m_pkthdr.len > 2048) && (m_head->m_pkthdr.len <= 4096)) fp->tx_pkts_hist[1]++; else if((m_head->m_pkthdr.len > 4096) && (m_head->m_pkthdr.len <= 8192)) fp->tx_pkts_hist[2]++; else if((m_head->m_pkthdr.len > 8192) && (m_head->m_pkthdr.len <= 12288 )) fp->tx_pkts_hist[3]++; else if((m_head->m_pkthdr.len > 11288) && (m_head->m_pkthdr.len <= 16394)) fp->tx_pkts_hist[4]++; else if((m_head->m_pkthdr.len > 16384) && (m_head->m_pkthdr.len <= 20480)) fp->tx_pkts_hist[5]++; else if((m_head->m_pkthdr.len > 20480) && (m_head->m_pkthdr.len <= 24576)) fp->tx_pkts_hist[6]++; else if((m_head->m_pkthdr.len > 24576) && (m_head->m_pkthdr.len <= 28672)) fp->tx_pkts_hist[7]++; else if((m_head->m_pkthdr.len > 28762) && (m_head->m_pkthdr.len <= 32768)) fp->tx_pkts_hist[8]++; else if((m_head->m_pkthdr.len > 32768) && (m_head->m_pkthdr.len <= 36864)) fp->tx_pkts_hist[9]++; else if((m_head->m_pkthdr.len > 36864) && (m_head->m_pkthdr.len <= 40960)) fp->tx_pkts_hist[10]++; else if((m_head->m_pkthdr.len > 40960) && (m_head->m_pkthdr.len <= 45056)) fp->tx_pkts_hist[11]++; else if((m_head->m_pkthdr.len > 45056) && (m_head->m_pkthdr.len <= 49152)) fp->tx_pkts_hist[12]++; else if((m_head->m_pkthdr.len > 49512) && m_head->m_pkthdr.len <= 53248)) fp->tx_pkts_hist[13]++; else if((m_head->m_pkthdr.len > 53248) && (m_head->m_pkthdr.len <= 57344)) fp->tx_pkts_hist[14]++; else if((m_head->m_pkthdr.len > 53248) && (m_head->m_pkthdr.len <= 57344)) fp->tx_pkts_hist[15]++; else if((m_head->m_pkthdr.len > 57344) && (m_head->m_pkthdr.len <= 61440)) fp->tx_pkts_hist[16]++; else fp->tx_pkts_hist[17]++; } if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { elem_left = ecore_chain_get_elem_left(&txq->tx_pbl); bd_used = TX_RING_SIZE - elem_left; if(bd_used <= 100) fp->tx_pkts_q[0]++; else if((bd_used > 100) && (bd_used <= 500)) fp->tx_pkts_q[1]++; else if((bd_used > 500) && (bd_used <= 1000)) fp->tx_pkts_q[2]++; else if((bd_used > 1000) && (bd_used <= 2000)) fp->tx_pkts_q[3]++; else if((bd_used > 3000) && (bd_used <= 4000)) fp->tx_pkts_q[4]++; else if((bd_used > 4000) && (bd_used <= 5000)) fp->tx_pkts_q[5]++; else if((bd_used > 6000) && (bd_used <= 7000)) fp->tx_pkts_q[6]++; else if((bd_used > 7000) && (bd_used <= 8000)) fp->tx_pkts_q[7]++; else if((bd_used > 8000) && (bd_used <= 9000)) fp->tx_pkts_q[8]++; else if((bd_used > 9000) && (bd_used <= 10000)) fp->tx_pkts_q[9]++; else if((bd_used > 10000) && (bd_used <= 11000)) fp->tx_pkts_q[10]++; else if((bd_used > 11000) && (bd_used <= 12000)) fp->tx_pkts_q[11]++; else if((bd_used > 12000) && (bd_used <= 13000)) fp->tx_pkts_q[12]++; else if((bd_used > 13000) && (bd_used <= 14000)) fp->tx_pkts_q[13]++; else if((bd_used > 14000) && (bd_used <= 15000)) fp->tx_pkts_q[14]++; else if((bd_used > 15000) && (bd_used <= 16000)) fp->tx_pkts_q[15]++; else fp->tx_pkts_q[16]++; } #endif /* end of QLNX_TRACE_PERF_DATA */ if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) { QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs" " in chain[%d] trying to free packets\n", nsegs, elem_left, fp->rss_id); fp->tx_nsegs_gt_elem_left++; (void)qlnx_tx_int(ha, fp, txq); if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) { QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs in chain[%d]\n", nsegs, elem_left, fp->rss_id); fp->err_tx_nsegs_gt_elem_left++; fp->tx_ring_full = 1; if (ha->storm_stats_enable) ha->storm_stats_gather = 1; return (ENOBUFS); } } bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); txq->sw_tx_ring[idx].mp = m_head; first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl); memset(first_bd, 0, sizeof(*first_bd)); first_bd->data.bd_flags.bitfields = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len); nbd++; if (m_head->m_pkthdr.csum_flags & CSUM_IP) { first_bd->data.bd_flags.bitfields |= (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); } if (m_head->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) { first_bd->data.bd_flags.bitfields |= (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT); } if (m_head->m_flags & M_VLANTAG) { first_bd->data.vlan = m_head->m_pkthdr.ether_vtag; first_bd->data.bd_flags.bitfields |= (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT); } if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { first_bd->data.bd_flags.bitfields |= (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); first_bd->data.bd_flags.bitfields |= (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); nbds_in_hdr = 1; if (offset == segs->ds_len) { BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); segs++; seg_idx++; second_bd = (struct eth_tx_2nd_bd *) ecore_chain_produce(&txq->tx_pbl); memset(second_bd, 0, sizeof(*second_bd)); nbd++; if (seg_idx < nsegs) { BD_SET_UNMAP_ADDR_LEN(second_bd, \ (segs->ds_addr), (segs->ds_len)); segs++; seg_idx++; } third_bd = (struct eth_tx_3rd_bd *) ecore_chain_produce(&txq->tx_pbl); memset(third_bd, 0, sizeof(*third_bd)); third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; third_bd->data.bitfields |= (nbds_in_hdr<ds_addr), (segs->ds_len)); segs++; seg_idx++; } for (; seg_idx < nsegs; seg_idx++) { tx_data_bd = (struct eth_tx_bd *) ecore_chain_produce(&txq->tx_pbl); memset(tx_data_bd, 0, sizeof(*tx_data_bd)); BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ segs->ds_addr,\ segs->ds_len); segs++; nbd++; } } else if (offset < segs->ds_len) { BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); second_bd = (struct eth_tx_2nd_bd *) ecore_chain_produce(&txq->tx_pbl); memset(second_bd, 0, sizeof(*second_bd)); BD_SET_UNMAP_ADDR_LEN(second_bd, \ (segs->ds_addr + offset),\ (segs->ds_len - offset)); nbd++; segs++; third_bd = (struct eth_tx_3rd_bd *) ecore_chain_produce(&txq->tx_pbl); memset(third_bd, 0, sizeof(*third_bd)); BD_SET_UNMAP_ADDR_LEN(third_bd, \ segs->ds_addr,\ segs->ds_len); third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; third_bd->data.bitfields |= (nbds_in_hdr<tx_pbl); memset(tx_data_bd, 0, sizeof(*tx_data_bd)); BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ segs->ds_addr,\ segs->ds_len); segs++; nbd++; } } else { offset = offset - segs->ds_len; segs++; for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { if (offset) nbds_in_hdr++; tx_data_bd = (struct eth_tx_bd *) ecore_chain_produce(&txq->tx_pbl); memset(tx_data_bd, 0, sizeof(*tx_data_bd)); if (second_bd == NULL) { second_bd = (struct eth_tx_2nd_bd *) tx_data_bd; } else if (third_bd == NULL) { third_bd = (struct eth_tx_3rd_bd *) tx_data_bd; } if (offset && (offset < segs->ds_len)) { BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ segs->ds_addr, offset); tx_data_bd = (struct eth_tx_bd *) ecore_chain_produce(&txq->tx_pbl); memset(tx_data_bd, 0, sizeof(*tx_data_bd)); if (second_bd == NULL) { second_bd = (struct eth_tx_2nd_bd *)tx_data_bd; } else if (third_bd == NULL) { third_bd = (struct eth_tx_3rd_bd *)tx_data_bd; } BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ (segs->ds_addr + offset), \ (segs->ds_len - offset)); nbd++; offset = 0; } else { if (offset) offset = offset - segs->ds_len; BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ segs->ds_addr, segs->ds_len); } segs++; nbd++; } if (third_bd == NULL) { third_bd = (struct eth_tx_3rd_bd *) ecore_chain_produce(&txq->tx_pbl); memset(third_bd, 0, sizeof(*third_bd)); } third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; third_bd->data.bitfields |= (nbds_in_hdr<tx_tso_pkts++; } else { segs++; for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { tx_data_bd = (struct eth_tx_bd *) ecore_chain_produce(&txq->tx_pbl); memset(tx_data_bd, 0, sizeof(*tx_data_bd)); BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\ segs->ds_len); segs++; nbd++; } first_bd->data.bitfields = (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; first_bd->data.bitfields = htole16(first_bd->data.bitfields); fp->tx_non_tso_pkts++; } first_bd->data.nbds = nbd; if (ha->dbg_trace_tso_pkt_len) { if (fp->tx_tso_max_nsegs < nsegs) fp->tx_tso_max_nsegs = nsegs; if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs)) fp->tx_tso_min_nsegs = nsegs; } txq->sw_tx_ring[idx].nsegs = nsegs; txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1); txq->tx_db.data.bd_prod = htole16(ecore_chain_get_prod_idx(&txq->tx_pbl)); qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw); QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id); return (0); } static void qlnx_stop(qlnx_host_t *ha) { if_t ifp = ha->ifp; int i; if_setdrvflagbits(ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING)); /* * We simply lock and unlock each fp->tx_mtx to * propagate the if_drv_flags * state to each tx thread */ QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state); if (ha->state == QLNX_STATE_OPEN) { for (i = 0; i < ha->num_rss; i++) { struct qlnx_fastpath *fp = &ha->fp_array[i]; mtx_lock(&fp->tx_mtx); mtx_unlock(&fp->tx_mtx); if (fp->fp_taskqueue != NULL) taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); } } #ifdef QLNX_ENABLE_IWARP if (qlnx_vf_device(ha) != 0) { qlnx_rdma_dev_close(ha); } #endif /* #ifdef QLNX_ENABLE_IWARP */ qlnx_unload(ha); return; } static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha) { return(TX_RING_SIZE - 1); } uint8_t * qlnx_get_mac_addr(qlnx_host_t *ha) { struct ecore_hwfn *p_hwfn; unsigned char mac[ETHER_ADDR_LEN]; uint8_t p_is_forced; p_hwfn = &ha->cdev.hwfns[0]; if (qlnx_vf_device(ha) != 0) return (p_hwfn->hw_info.hw_mac_addr); ecore_vf_read_bulletin(p_hwfn, &p_is_forced); if (ecore_vf_bulletin_get_forced_mac(p_hwfn, mac, &p_is_forced) == true) { device_printf(ha->pci_dev, "%s: p_is_forced = %d" " mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, p_is_forced, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); memcpy(ha->primary_mac, mac, ETH_ALEN); } return (ha->primary_mac); } static uint32_t qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link) { uint32_t ifm_type = 0; switch (if_link->media_type) { case MEDIA_MODULE_FIBER: case MEDIA_UNSPECIFIED: if (if_link->speed == (100 * 1000)) ifm_type = QLNX_IFM_100G_SR4; else if (if_link->speed == (40 * 1000)) ifm_type = IFM_40G_SR4; else if (if_link->speed == (25 * 1000)) ifm_type = QLNX_IFM_25G_SR; else if (if_link->speed == (10 * 1000)) ifm_type = (IFM_10G_LR | IFM_10G_SR); else if (if_link->speed == (1 * 1000)) ifm_type = (IFM_1000_SX | IFM_1000_LX); break; case MEDIA_DA_TWINAX: if (if_link->speed == (100 * 1000)) ifm_type = QLNX_IFM_100G_CR4; else if (if_link->speed == (40 * 1000)) ifm_type = IFM_40G_CR4; else if (if_link->speed == (25 * 1000)) ifm_type = QLNX_IFM_25G_CR; else if (if_link->speed == (10 * 1000)) ifm_type = IFM_10G_TWINAX; break; default : ifm_type = IFM_UNKNOWN; break; } return (ifm_type); } /***************************************************************************** * Interrupt Service Functions *****************************************************************************/ static int qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf *mp_head, uint16_t len) { struct mbuf *mp, *mpf, *mpl; struct sw_rx_data *sw_rx_data; struct qlnx_rx_queue *rxq; uint16_t len_in_buffer; rxq = fp->rxq; mpf = mpl = mp = NULL; while (len) { rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; mp = sw_rx_data->data; if (mp == NULL) { QL_DPRINT1(ha, "mp = NULL\n"); fp->err_rx_mp_null++; rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); if (mpf != NULL) m_freem(mpf); return (-1); } bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD); if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { QL_DPRINT1(ha, "New buffer allocation failed, dropping" " incoming packet and reusing its buffer\n"); qlnx_reuse_rx_data(rxq); fp->err_rx_alloc_errors++; if (mpf != NULL) m_freem(mpf); return (-1); } ecore_chain_consume(&rxq->rx_bd_ring); if (len > rxq->rx_buf_size) len_in_buffer = rxq->rx_buf_size; else len_in_buffer = len; len = len - len_in_buffer; mp->m_flags &= ~M_PKTHDR; mp->m_next = NULL; mp->m_len = len_in_buffer; if (mpf == NULL) mpf = mpl = mp; else { mpl->m_next = mp; mpl = mp; } } if (mpf != NULL) mp_head->m_next = mpf; return (0); } static void qlnx_tpa_start(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct qlnx_rx_queue *rxq, struct eth_fast_path_rx_tpa_start_cqe *cqe) { uint32_t agg_index; if_t ifp = ha->ifp; struct mbuf *mp; struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; struct sw_rx_data *sw_rx_data; dma_addr_t addr; bus_dmamap_t map; struct eth_rx_bd *rx_bd; int i; uint8_t hash_type; agg_index = cqe->tpa_agg_index; QL_DPRINT7(ha, "[rss_id = %d]: enter\n \ \t type = 0x%x\n \ \t bitfields = 0x%x\n \ \t seg_len = 0x%x\n \ \t pars_flags = 0x%x\n \ \t vlan_tag = 0x%x\n \ \t rss_hash = 0x%x\n \ \t len_on_first_bd = 0x%x\n \ \t placement_offset = 0x%x\n \ \t tpa_agg_index = 0x%x\n \ \t header_len = 0x%x\n \ \t ext_bd_len_list[0] = 0x%x\n \ \t ext_bd_len_list[1] = 0x%x\n \ \t ext_bd_len_list[2] = 0x%x\n \ \t ext_bd_len_list[3] = 0x%x\n \ \t ext_bd_len_list[4] = 0x%x\n", fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len, cqe->pars_flags.flags, cqe->vlan_tag, cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset, cqe->tpa_agg_index, cqe->header_len, cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1], cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3], cqe->ext_bd_len_list[4]); if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { fp->err_rx_tpa_invalid_agg_num++; return; } sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD); mp = sw_rx_data->data; QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp); if (mp == NULL) { QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id); fp->err_rx_mp_null++; rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); return; } if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) { QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error," " flags = %x, dropping incoming packet\n", fp->rss_id, rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags)); fp->err_rx_hw_errors++; qlnx_reuse_rx_data(rxq); QLNX_INC_IERRORS(ifp); return; } if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { QL_DPRINT7(ha, "[%d]: New buffer allocation failed," " dropping incoming packet and reusing its buffer\n", fp->rss_id); fp->err_rx_alloc_errors++; QLNX_INC_IQDROPS(ifp); /* * Load the tpa mbuf into the rx ring and save the * posted mbuf */ map = sw_rx_data->map; addr = sw_rx_data->dma_addr; sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data; sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr; sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map; rxq->tpa_info[agg_index].rx_buf.data = mp; rxq->tpa_info[agg_index].rx_buf.dma_addr = addr; rxq->tpa_info[agg_index].rx_buf.map = map; rx_bd = (struct eth_rx_bd *) ecore_chain_produce(&rxq->rx_bd_ring); rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr)); rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr)); bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_PREREAD); rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); ecore_chain_consume(&rxq->rx_bd_ring); /* Now reuse any buffers posted in ext_bd_len_list */ for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { if (cqe->ext_bd_len_list[i] == 0) break; qlnx_reuse_rx_data(rxq); } rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; return; } if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { QL_DPRINT7(ha, "[%d]: invalid aggregation state," " dropping incoming packet and reusing its buffer\n", fp->rss_id); QLNX_INC_IQDROPS(ifp); /* if we already have mbuf head in aggregation free it */ if (rxq->tpa_info[agg_index].mpf) { m_freem(rxq->tpa_info[agg_index].mpf); rxq->tpa_info[agg_index].mpl = NULL; } rxq->tpa_info[agg_index].mpf = mp; rxq->tpa_info[agg_index].mpl = NULL; rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); ecore_chain_consume(&rxq->rx_bd_ring); /* Now reuse any buffers posted in ext_bd_len_list */ for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { if (cqe->ext_bd_len_list[i] == 0) break; qlnx_reuse_rx_data(rxq); } rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; return; } /* * first process the ext_bd_len_list * if this fails then we simply drop the packet */ ecore_chain_consume(&rxq->rx_bd_ring); rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id); if (cqe->ext_bd_len_list[i] == 0) break; sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD); mpc = sw_rx_data->data; if (mpc == NULL) { QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); fp->err_rx_mp_null++; if (mpf != NULL) m_freem(mpf); mpf = mpl = NULL; rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; ecore_chain_consume(&rxq->rx_bd_ring); rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); continue; } if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { QL_DPRINT7(ha, "[%d]: New buffer allocation failed," " dropping incoming packet and reusing its" " buffer\n", fp->rss_id); qlnx_reuse_rx_data(rxq); if (mpf != NULL) m_freem(mpf); mpf = mpl = NULL; rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; ecore_chain_consume(&rxq->rx_bd_ring); rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); continue; } mpc->m_flags &= ~M_PKTHDR; mpc->m_next = NULL; mpc->m_len = cqe->ext_bd_len_list[i]; if (mpf == NULL) { mpf = mpl = mpc; } else { mpl->m_len = ha->rx_buf_size; mpl->m_next = mpc; mpl = mpc; } ecore_chain_consume(&rxq->rx_bd_ring); rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); } if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping" " incoming packet and reusing its buffer\n", fp->rss_id); QLNX_INC_IQDROPS(ifp); rxq->tpa_info[agg_index].mpf = mp; rxq->tpa_info[agg_index].mpl = NULL; return; } rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset; if (mpf != NULL) { mp->m_len = ha->rx_buf_size; mp->m_next = mpf; rxq->tpa_info[agg_index].mpf = mp; rxq->tpa_info[agg_index].mpl = mpl; } else { mp->m_len = cqe->len_on_first_bd + cqe->placement_offset; rxq->tpa_info[agg_index].mpf = mp; rxq->tpa_info[agg_index].mpl = mp; mp->m_next = NULL; } mp->m_flags |= M_PKTHDR; /* assign packet to this interface interface */ mp->m_pkthdr.rcvif = ifp; /* assume no hardware checksum has complated */ mp->m_pkthdr.csum_flags = 0; //mp->m_pkthdr.flowid = fp->rss_id; mp->m_pkthdr.flowid = cqe->rss_hash; hash_type = cqe->bitfields & (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); switch (hash_type) { case RSS_HASH_TYPE_IPV4: M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); break; case RSS_HASH_TYPE_TCP_IPV4: M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); break; case RSS_HASH_TYPE_IPV6: M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); break; case RSS_HASH_TYPE_TCP_IPV6: M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); break; default: M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); break; } mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); mp->m_pkthdr.csum_data = 0xFFFF; if (CQE_HAS_VLAN(cqe->pars_flags.flags)) { mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag); mp->m_flags |= M_VLANTAG; } rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START; QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n", fp->rss_id, rxq->tpa_info[agg_index].agg_state, rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl); return; } static void qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct qlnx_rx_queue *rxq, struct eth_fast_path_rx_tpa_cont_cqe *cqe) { struct sw_rx_data *sw_rx_data; int i; struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; struct mbuf *mp; uint32_t agg_index; QL_DPRINT7(ha, "[%d]: enter\n \ \t type = 0x%x\n \ \t tpa_agg_index = 0x%x\n \ \t len_list[0] = 0x%x\n \ \t len_list[1] = 0x%x\n \ \t len_list[2] = 0x%x\n \ \t len_list[3] = 0x%x\n \ \t len_list[4] = 0x%x\n \ \t len_list[5] = 0x%x\n", fp->rss_id, cqe->type, cqe->tpa_agg_index, cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]); agg_index = cqe->tpa_agg_index; if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id); fp->err_rx_tpa_invalid_agg_num++; return; } for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) { QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id); if (cqe->len_list[i] == 0) break; if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) { qlnx_reuse_rx_data(rxq); continue; } sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD); mpc = sw_rx_data->data; if (mpc == NULL) { QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); fp->err_rx_mp_null++; if (mpf != NULL) m_freem(mpf); mpf = mpl = NULL; rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; ecore_chain_consume(&rxq->rx_bd_ring); rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); continue; } if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { QL_DPRINT7(ha, "[%d]: New buffer allocation failed," " dropping incoming packet and reusing its" " buffer\n", fp->rss_id); qlnx_reuse_rx_data(rxq); if (mpf != NULL) m_freem(mpf); mpf = mpl = NULL; rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; ecore_chain_consume(&rxq->rx_bd_ring); rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); continue; } mpc->m_flags &= ~M_PKTHDR; mpc->m_next = NULL; mpc->m_len = cqe->len_list[i]; if (mpf == NULL) { mpf = mpl = mpc; } else { mpl->m_len = ha->rx_buf_size; mpl->m_next = mpc; mpl = mpc; } ecore_chain_consume(&rxq->rx_bd_ring); rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); } QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n", fp->rss_id, mpf, mpl); if (mpf != NULL) { mp = rxq->tpa_info[agg_index].mpl; mp->m_len = ha->rx_buf_size; mp->m_next = mpf; rxq->tpa_info[agg_index].mpl = mpl; } return; } static int qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct qlnx_rx_queue *rxq, struct eth_fast_path_rx_tpa_end_cqe *cqe) { struct sw_rx_data *sw_rx_data; int i; struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; struct mbuf *mp; uint32_t agg_index; uint32_t len = 0; if_t ifp = ha->ifp; QL_DPRINT7(ha, "[%d]: enter\n \ \t type = 0x%x\n \ \t tpa_agg_index = 0x%x\n \ \t total_packet_len = 0x%x\n \ \t num_of_bds = 0x%x\n \ \t end_reason = 0x%x\n \ \t num_of_coalesced_segs = 0x%x\n \ \t ts_delta = 0x%x\n \ \t len_list[0] = 0x%x\n \ \t len_list[1] = 0x%x\n \ \t len_list[2] = 0x%x\n \ \t len_list[3] = 0x%x\n", fp->rss_id, cqe->type, cqe->tpa_agg_index, cqe->total_packet_len, cqe->num_of_bds, cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta, cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], cqe->len_list[3]); agg_index = cqe->tpa_agg_index; if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id); fp->err_rx_tpa_invalid_agg_num++; return (0); } for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) { QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id); if (cqe->len_list[i] == 0) break; if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) { QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id); qlnx_reuse_rx_data(rxq); continue; } sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD); mpc = sw_rx_data->data; if (mpc == NULL) { QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); fp->err_rx_mp_null++; if (mpf != NULL) m_freem(mpf); mpf = mpl = NULL; rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; ecore_chain_consume(&rxq->rx_bd_ring); rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); continue; } if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { QL_DPRINT7(ha, "[%d]: New buffer allocation failed," " dropping incoming packet and reusing its" " buffer\n", fp->rss_id); qlnx_reuse_rx_data(rxq); if (mpf != NULL) m_freem(mpf); mpf = mpl = NULL; rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; ecore_chain_consume(&rxq->rx_bd_ring); rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); continue; } mpc->m_flags &= ~M_PKTHDR; mpc->m_next = NULL; mpc->m_len = cqe->len_list[i]; if (mpf == NULL) { mpf = mpl = mpc; } else { mpl->m_len = ha->rx_buf_size; mpl->m_next = mpc; mpl = mpc; } ecore_chain_consume(&rxq->rx_bd_ring); rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); } QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id); if (mpf != NULL) { QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id); mp = rxq->tpa_info[agg_index].mpl; mp->m_len = ha->rx_buf_size; mp->m_next = mpf; } if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) { QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id); if (rxq->tpa_info[agg_index].mpf != NULL) m_freem(rxq->tpa_info[agg_index].mpf); rxq->tpa_info[agg_index].mpf = NULL; rxq->tpa_info[agg_index].mpl = NULL; rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; return (0); } mp = rxq->tpa_info[agg_index].mpf; m_adj(mp, rxq->tpa_info[agg_index].placement_offset); mp->m_pkthdr.len = cqe->total_packet_len; if (mp->m_next == NULL) mp->m_len = mp->m_pkthdr.len; else { /* compute the total packet length */ mpf = mp; while (mpf != NULL) { len += mpf->m_len; mpf = mpf->m_next; } if (cqe->total_packet_len > len) { mpl = rxq->tpa_info[agg_index].mpl; mpl->m_len += (cqe->total_packet_len - len); } } QLNX_INC_IPACKETS(ifp); QLNX_INC_IBYTES(ifp, (cqe->total_packet_len)); QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%" PRIu64 "\n \ m_len = 0x%x m_pkthdr_len = 0x%x\n", fp->rss_id, mp->m_pkthdr.csum_data, (uint64_t)mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len); if_input(ifp, mp); rxq->tpa_info[agg_index].mpf = NULL; rxq->tpa_info[agg_index].mpl = NULL; rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; return (cqe->num_of_coalesced_segs); } static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, int lro_enable) { uint16_t hw_comp_cons, sw_comp_cons; int rx_pkt = 0; struct qlnx_rx_queue *rxq = fp->rxq; if_t ifp = ha->ifp; struct ecore_dev *cdev = &ha->cdev; struct ecore_hwfn *p_hwfn; #ifdef QLNX_SOFT_LRO struct lro_ctrl *lro; lro = &rxq->lro; #endif /* #ifdef QLNX_SOFT_LRO */ hw_comp_cons = le16toh(*rxq->hw_cons_ptr); sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)]; /* Memory barrier to prevent the CPU from doing speculative reads of CQE * / BD in the while-loop before reading hw_comp_cons. If the CQE is * read before it is written by FW, then FW writes CQE and SB, and then * the CPU reads the hw_comp_cons, it will use an old CQE. */ /* Loop to complete all indicated BDs */ while (sw_comp_cons != hw_comp_cons) { union eth_rx_cqe *cqe; struct eth_fast_path_rx_reg_cqe *fp_cqe; struct sw_rx_data *sw_rx_data; register struct mbuf *mp; enum eth_rx_cqe_type cqe_type; uint16_t len, pad, len_on_first_bd; uint8_t *data; uint8_t hash_type; /* Get the CQE from the completion ring */ cqe = (union eth_rx_cqe *) ecore_chain_consume(&rxq->rx_comp_ring); cqe_type = cqe->fast_path_regular.type; if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) { QL_DPRINT3(ha, "Got a slowath CQE\n"); ecore_eth_cqe_completion(p_hwfn, (struct eth_slow_path_rx_cqe *)cqe); goto next_cqe; } if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) { switch (cqe_type) { case ETH_RX_CQE_TYPE_TPA_START: qlnx_tpa_start(ha, fp, rxq, &cqe->fast_path_tpa_start); fp->tpa_start++; break; case ETH_RX_CQE_TYPE_TPA_CONT: qlnx_tpa_cont(ha, fp, rxq, &cqe->fast_path_tpa_cont); fp->tpa_cont++; break; case ETH_RX_CQE_TYPE_TPA_END: rx_pkt += qlnx_tpa_end(ha, fp, rxq, &cqe->fast_path_tpa_end); fp->tpa_end++; break; default: break; } goto next_cqe; } /* Get the data from the SW ring */ sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; mp = sw_rx_data->data; if (mp == NULL) { QL_DPRINT1(ha, "mp = NULL\n"); fp->err_rx_mp_null++; rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); goto next_cqe; } bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD); /* non GRO */ fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */ len = le16toh(fp_cqe->pkt_len); pad = fp_cqe->placement_offset; #if 0 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x," " len %u, parsing flags = %d pad = %d\n", cqe_type, fp_cqe->bitfields, le16toh(fp_cqe->vlan_tag), len, le16toh(fp_cqe->pars_flags.flags), pad); #endif data = mtod(mp, uint8_t *); data = data + pad; if (0) qlnx_dump_buf8(ha, __func__, data, len); /* For every Rx BD consumed, we allocate a new BD so the BD ring * is always with a fixed size. If allocation fails, we take the * consumed BD and return it to the ring in the PROD position. * The packet that was received on that BD will be dropped (and * not passed to the upper stack). */ /* If this is an error packet then drop it */ if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) & CQE_FLAGS_ERR) { QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x," " dropping incoming packet\n", sw_comp_cons, le16toh(cqe->fast_path_regular.pars_flags.flags)); fp->err_rx_hw_errors++; qlnx_reuse_rx_data(rxq); QLNX_INC_IERRORS(ifp); goto next_cqe; } if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { QL_DPRINT1(ha, "New buffer allocation failed, dropping" " incoming packet and reusing its buffer\n"); qlnx_reuse_rx_data(rxq); fp->err_rx_alloc_errors++; QLNX_INC_IQDROPS(ifp); goto next_cqe; } ecore_chain_consume(&rxq->rx_bd_ring); len_on_first_bd = fp_cqe->len_on_first_bd; m_adj(mp, pad); mp->m_pkthdr.len = len; if ((len > 60 ) && (len > len_on_first_bd)) { mp->m_len = len_on_first_bd; if (qlnx_rx_jumbo_chain(ha, fp, mp, (len - len_on_first_bd)) != 0) { m_freem(mp); QLNX_INC_IQDROPS(ifp); goto next_cqe; } } else if (len_on_first_bd < len) { fp->err_rx_jumbo_chain_pkts++; } else { mp->m_len = len; } mp->m_flags |= M_PKTHDR; /* assign packet to this interface interface */ mp->m_pkthdr.rcvif = ifp; /* assume no hardware checksum has complated */ mp->m_pkthdr.csum_flags = 0; mp->m_pkthdr.flowid = fp_cqe->rss_hash; hash_type = fp_cqe->bitfields & (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); switch (hash_type) { case RSS_HASH_TYPE_IPV4: M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); break; case RSS_HASH_TYPE_TCP_IPV4: M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); break; case RSS_HASH_TYPE_IPV6: M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); break; case RSS_HASH_TYPE_TCP_IPV6: M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); break; default: M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); break; } if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) { mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; } if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) { mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; } if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) { mp->m_pkthdr.csum_data = 0xFFFF; mp->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); } if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) { mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag); mp->m_flags |= M_VLANTAG; } QLNX_INC_IPACKETS(ifp); QLNX_INC_IBYTES(ifp, len); #ifdef QLNX_SOFT_LRO if (lro_enable) tcp_lro_queue_mbuf(lro, mp); else if_input(ifp, mp); #else if_input(ifp, mp); #endif /* #ifdef QLNX_SOFT_LRO */ rx_pkt++; rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); next_cqe: /* don't consume bd rx buffer */ ecore_chain_recycle_consumed(&rxq->rx_comp_ring); sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); /* CR TPA - revisit how to handle budget in TPA perhaps increase on "end" */ if (rx_pkt == budget) break; } /* repeat while sw_comp_cons != hw_comp_cons... */ /* Update producers */ qlnx_update_rx_prod(p_hwfn, rxq); return rx_pkt; } /* * fast path interrupt */ static void qlnx_fp_isr(void *arg) { qlnx_ivec_t *ivec = arg; qlnx_host_t *ha; struct qlnx_fastpath *fp = NULL; int idx; ha = ivec->ha; if (ha->state != QLNX_STATE_OPEN) { return; } idx = ivec->rss_idx; if ((idx = ivec->rss_idx) >= ha->num_rss) { QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx); ha->err_illegal_intr++; return; } fp = &ha->fp_array[idx]; if (fp == NULL) { ha->err_fp_null++; } else { int rx_int = 0; #ifdef QLNX_SOFT_LRO int total_rx_count = 0; #endif int lro_enable, tc; struct qlnx_tx_queue *txq; uint16_t elem_left; lro_enable = if_getcapenable(ha->ifp) & IFCAP_LRO; ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); do { for (tc = 0; tc < ha->num_tc; tc++) { txq = fp->txq[tc]; if((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) < QLNX_TX_ELEM_THRESH) { if (mtx_trylock(&fp->tx_mtx)) { #ifdef QLNX_TRACE_PERF_DATA tx_compl = fp->tx_pkts_completed; #endif qlnx_tx_int(ha, fp, fp->txq[tc]); #ifdef QLNX_TRACE_PERF_DATA fp->tx_pkts_compl_intr += (fp->tx_pkts_completed - tx_compl); if ((fp->tx_pkts_completed - tx_compl) <= 32) fp->tx_comInt[0]++; else if (((fp->tx_pkts_completed - tx_compl) > 32) && ((fp->tx_pkts_completed - tx_compl) <= 64)) fp->tx_comInt[1]++; else if(((fp->tx_pkts_completed - tx_compl) > 64) && ((fp->tx_pkts_completed - tx_compl) <= 128)) fp->tx_comInt[2]++; else if(((fp->tx_pkts_completed - tx_compl) > 128)) fp->tx_comInt[3]++; #endif mtx_unlock(&fp->tx_mtx); } } } rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold, lro_enable); if (rx_int) { fp->rx_pkts += rx_int; #ifdef QLNX_SOFT_LRO total_rx_count += rx_int; #endif } } while (rx_int); #ifdef QLNX_SOFT_LRO { struct lro_ctrl *lro; lro = &fp->rxq->lro; if (lro_enable && total_rx_count) { #ifdef QLNX_TRACE_LRO_CNT if (lro->lro_mbuf_count & ~1023) fp->lro_cnt_1024++; else if (lro->lro_mbuf_count & ~511) fp->lro_cnt_512++; else if (lro->lro_mbuf_count & ~255) fp->lro_cnt_256++; else if (lro->lro_mbuf_count & ~127) fp->lro_cnt_128++; else if (lro->lro_mbuf_count & ~63) fp->lro_cnt_64++; #endif /* #ifdef QLNX_TRACE_LRO_CNT */ tcp_lro_flush_all(lro); } } #endif /* #ifdef QLNX_SOFT_LRO */ ecore_sb_update_sb_idx(fp->sb_info); rmb(); ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); } return; } /* * slow path interrupt processing function * can be invoked in polled mode or in interrupt mode via taskqueue. */ void qlnx_sp_isr(void *arg) { struct ecore_hwfn *p_hwfn; qlnx_host_t *ha; p_hwfn = arg; ha = (qlnx_host_t *)p_hwfn->p_dev; ha->sp_interrupts++; QL_DPRINT2(ha, "enter\n"); ecore_int_sp_dpc(p_hwfn); QL_DPRINT2(ha, "exit\n"); return; } /***************************************************************************** * Support Functions for DMA'able Memory *****************************************************************************/ static void qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { *((bus_addr_t *)arg) = 0; if (error) { printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); return; } *((bus_addr_t *)arg) = segs[0].ds_addr; return; } static int qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) { int ret = 0; bus_addr_t b_addr; ret = bus_dma_tag_create( ha->parent_tag,/* parent */ dma_buf->alignment, ((bus_size_t)(1ULL << 32)),/* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dma_buf->size, /* maxsize */ 1, /* nsegments */ dma_buf->size, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &dma_buf->dma_tag); if (ret) { QL_DPRINT1(ha, "could not create dma tag\n"); goto qlnx_alloc_dmabuf_exit; } ret = bus_dmamem_alloc(dma_buf->dma_tag, (void **)&dma_buf->dma_b, (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), &dma_buf->dma_map); if (ret) { bus_dma_tag_destroy(dma_buf->dma_tag); QL_DPRINT1(ha, "bus_dmamem_alloc failed\n"); goto qlnx_alloc_dmabuf_exit; } ret = bus_dmamap_load(dma_buf->dma_tag, dma_buf->dma_map, dma_buf->dma_b, dma_buf->size, qlnx_dmamap_callback, &b_addr, BUS_DMA_NOWAIT); if (ret || !b_addr) { bus_dma_tag_destroy(dma_buf->dma_tag); bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); ret = -1; goto qlnx_alloc_dmabuf_exit; } dma_buf->dma_addr = b_addr; qlnx_alloc_dmabuf_exit: return ret; } static void qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) { bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); bus_dma_tag_destroy(dma_buf->dma_tag); return; } void * qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size) { qlnx_dma_t dma_buf; qlnx_dma_t *dma_p; qlnx_host_t *ha __unused; ha = (qlnx_host_t *)ecore_dev; size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); memset(&dma_buf, 0, sizeof (qlnx_dma_t)); dma_buf.size = size + PAGE_SIZE; dma_buf.alignment = 8; if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0) return (NULL); bzero((uint8_t *)dma_buf.dma_b, dma_buf.size); *phys = dma_buf.dma_addr; dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size); memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t)); QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n", (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag, dma_buf.dma_b, (void *)dma_buf.dma_addr, size); return (dma_buf.dma_b); } void qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys, uint32_t size) { qlnx_dma_t dma_buf, *dma_p; qlnx_host_t *ha; ha = (qlnx_host_t *)ecore_dev; if (v_addr == NULL) return; size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size); QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n", (void *)dma_p->dma_map, (void *)dma_p->dma_tag, dma_p->dma_b, (void *)dma_p->dma_addr, size); dma_buf = *dma_p; if (!ha->qlnxr_debug) qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf); return; } static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha) { int ret; device_t dev; dev = ha->pci_dev; /* * Allocate parent DMA Tag */ ret = bus_dma_tag_create( bus_get_dma_tag(dev), /* parent */ 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &ha->parent_tag); if (ret) { QL_DPRINT1(ha, "could not create parent dma tag\n"); return (-1); } ha->flags.parent_tag = 1; return (0); } static void qlnx_free_parent_dma_tag(qlnx_host_t *ha) { if (ha->parent_tag != NULL) { bus_dma_tag_destroy(ha->parent_tag); ha->parent_tag = NULL; } return; } static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha) { if (bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */ QLNX_MAX_SEGMENTS, /* nsegments */ QLNX_MAX_TX_MBUF_SIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &ha->tx_tag)) { QL_DPRINT1(ha, "tx_tag alloc failed\n"); return (-1); } return (0); } static void qlnx_free_tx_dma_tag(qlnx_host_t *ha) { if (ha->tx_tag != NULL) { bus_dma_tag_destroy(ha->tx_tag); ha->tx_tag = NULL; } return; } static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha) { if (bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MJUM9BYTES, /* maxsize */ 1, /* nsegments */ MJUM9BYTES, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &ha->rx_tag)) { QL_DPRINT1(ha, " rx_tag alloc failed\n"); return (-1); } return (0); } static void qlnx_free_rx_dma_tag(qlnx_host_t *ha) { if (ha->rx_tag != NULL) { bus_dma_tag_destroy(ha->rx_tag); ha->rx_tag = NULL; } return; } /********************************* * Exported functions *********************************/ uint32_t qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id) { uint32_t bar_size; bar_id = bar_id * 2; bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev, SYS_RES_MEMORY, PCIR_BAR(bar_id)); return (bar_size); } uint32_t qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value) { *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, pci_reg, 1); return 0; } uint32_t qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg, uint16_t *reg_value) { *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, pci_reg, 2); return 0; } uint32_t qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg, uint32_t *reg_value) { *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, pci_reg, 4); return 0; } void qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value) { pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, pci_reg, reg_value, 1); return; } void qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg, uint16_t reg_value) { pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, pci_reg, reg_value, 2); return; } void qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg, uint32_t reg_value) { pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, pci_reg, reg_value, 4); return; } int qlnx_pci_find_capability(void *ecore_dev, int cap) { int reg; qlnx_host_t *ha; ha = ecore_dev; if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, ®) == 0) return reg; else { QL_DPRINT1(ha, "failed\n"); return 0; } } int qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap) { int reg; qlnx_host_t *ha; ha = ecore_dev; if (pci_find_extcap(ha->pci_dev, ext_cap, ®) == 0) return reg; else { QL_DPRINT1(ha, "failed\n"); return 0; } } uint32_t qlnx_reg_rd32(void *hwfn, uint32_t reg_addr) { uint32_t data32; struct ecore_hwfn *p_hwfn; p_hwfn = hwfn; data32 = bus_read_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ (bus_size_t)(p_hwfn->reg_offset + reg_addr)); return (data32); } void qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) { struct ecore_hwfn *p_hwfn = hwfn; bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ (bus_size_t)(p_hwfn->reg_offset + reg_addr), value); return; } void qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value) { struct ecore_hwfn *p_hwfn = hwfn; bus_write_2(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ (bus_size_t)(p_hwfn->reg_offset + reg_addr), value); return; } void qlnx_dbell_wr32_db(void *hwfn, void *reg_addr, uint32_t value) { struct ecore_dev *cdev; struct ecore_hwfn *p_hwfn; uint32_t offset; p_hwfn = hwfn; cdev = p_hwfn->p_dev; offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(p_hwfn->doorbells)); bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value); return; } void qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) { struct ecore_hwfn *p_hwfn = hwfn; bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_dbells, \ (bus_size_t)(p_hwfn->db_offset + reg_addr), value); return; } uint32_t qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr) { uint32_t data32; bus_size_t offset; struct ecore_dev *cdev; cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset); return (data32); } void qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value) { bus_size_t offset; struct ecore_dev *cdev; cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value); return; } void qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value) { bus_size_t offset; struct ecore_dev *cdev; cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value); return; } void * qlnx_zalloc(uint32_t size) { caddr_t va; va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT); bzero(va, size); return ((void *)va); } void -qlnx_barrier(void *p_hwfn) +qlnx_barrier(void *p_dev) { qlnx_host_t *ha; - ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; + ha = ((struct ecore_dev *) p_dev)->ha; bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE); } void qlnx_link_update(void *p_hwfn) { qlnx_host_t *ha; int prev_link_state; ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; qlnx_fill_link(ha, p_hwfn, &ha->if_link); prev_link_state = ha->link_up; ha->link_up = ha->if_link.link_up; if (prev_link_state != ha->link_up) { if (ha->link_up) { if_link_state_change(ha->ifp, LINK_STATE_UP); } else { if_link_state_change(ha->ifp, LINK_STATE_DOWN); } } #ifndef QLNX_VF #ifdef CONFIG_ECORE_SRIOV if (qlnx_vf_device(ha) != 0) { if (ha->sriov_initialized) qlnx_inform_vf_link_state(p_hwfn, ha); } #endif /* #ifdef CONFIG_ECORE_SRIOV */ #endif /* #ifdef QLNX_VF */ return; } static void __qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn *p_hwfn, struct ecore_vf_acquire_sw_info *p_sw_info) { p_sw_info->driver_version = (QLNX_VERSION_MAJOR << 24) | (QLNX_VERSION_MINOR << 16) | QLNX_VERSION_BUILD; p_sw_info->os_type = VFPF_ACQUIRE_OS_FREEBSD; return; } void qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req, void *p_sw_info) { __qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, p_sw_info); return; } void qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn, struct qlnx_link_output *if_link) { struct ecore_mcp_link_params link_params; struct ecore_mcp_link_state link_state; uint8_t p_change; struct ecore_ptt *p_ptt = NULL; memset(if_link, 0, sizeof(*if_link)); memset(&link_params, 0, sizeof(struct ecore_mcp_link_params)); memset(&link_state, 0, sizeof(struct ecore_mcp_link_state)); ha = (qlnx_host_t *)hwfn->p_dev; /* Prepare source inputs */ /* we only deal with physical functions */ if (qlnx_vf_device(ha) != 0) { p_ptt = ecore_ptt_acquire(hwfn); if (p_ptt == NULL) { QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); return; } ecore_mcp_get_media_type(hwfn, p_ptt, &if_link->media_type); ecore_ptt_release(hwfn, p_ptt); memcpy(&link_params, ecore_mcp_get_link_params(hwfn), sizeof(link_params)); memcpy(&link_state, ecore_mcp_get_link_state(hwfn), sizeof(link_state)); } else { ecore_mcp_get_media_type(hwfn, NULL, &if_link->media_type); ecore_vf_read_bulletin(hwfn, &p_change); ecore_vf_get_link_params(hwfn, &link_params); ecore_vf_get_link_state(hwfn, &link_state); } /* Set the link parameters to pass to protocol driver */ if (link_state.link_up) { if_link->link_up = true; if_link->speed = link_state.speed; } if_link->supported_caps = QLNX_LINK_CAP_FIBRE; if (link_params.speed.autoneg) if_link->supported_caps |= QLNX_LINK_CAP_Autoneg; if (link_params.pause.autoneg || (link_params.pause.forced_rx && link_params.pause.forced_tx)) if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause; if (link_params.pause.autoneg || link_params.pause.forced_rx || link_params.pause.forced_tx) if_link->supported_caps |= QLNX_LINK_CAP_Pause; if (link_params.speed.advertised_speeds & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half | QLNX_LINK_CAP_1000baseT_Full; if (link_params.speed.advertised_speeds & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full; if (link_params.speed.advertised_speeds & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full; if (link_params.speed.advertised_speeds & NVM_CFG1_PORT_DRV_LINK_SPEED_40G) if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full; if (link_params.speed.advertised_speeds & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full; if (link_params.speed.advertised_speeds & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full; if_link->advertised_caps = if_link->supported_caps; if_link->autoneg = link_params.speed.autoneg; if_link->duplex = QLNX_LINK_DUPLEX; /* Link partner capabilities */ if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD) if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half; if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD) if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full; if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G) if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full; if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G) if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full; if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G) if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full; if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G) if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full; if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G) if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full; if (link_state.an_complete) if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg; if (link_state.partner_adv_pause) if_link->link_partner_caps |= QLNX_LINK_CAP_Pause; if ((link_state.partner_adv_pause == ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) || (link_state.partner_adv_pause == ECORE_LINK_PARTNER_BOTH_PAUSE)) if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause; return; } void qlnx_schedule_recovery(void *p_hwfn) { qlnx_host_t *ha; ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; if (qlnx_vf_device(ha) != 0) { taskqueue_enqueue(ha->err_taskqueue, &ha->err_task); } return; } static int qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params) { int rc, i; for (i = 0; i < cdev->num_hwfns; i++) { struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; p_hwfn->pf_params = *func_params; #ifdef QLNX_ENABLE_IWARP if (qlnx_vf_device((qlnx_host_t *)cdev) != 0) { p_hwfn->using_ll2 = true; } #endif /* #ifdef QLNX_ENABLE_IWARP */ } rc = ecore_resc_alloc(cdev); if (rc) goto qlnx_nic_setup_exit; ecore_resc_setup(cdev); qlnx_nic_setup_exit: return rc; } static int qlnx_nic_start(struct ecore_dev *cdev) { int rc; struct ecore_hw_init_params params; bzero(¶ms, sizeof (struct ecore_hw_init_params)); params.p_tunn = NULL; params.b_hw_start = true; params.int_mode = cdev->int_mode; params.allow_npar_tx_switch = true; params.bin_fw_data = NULL; rc = ecore_hw_init(cdev, ¶ms); if (rc) { ecore_resc_free(cdev); return rc; } return 0; } static int qlnx_slowpath_start(qlnx_host_t *ha) { struct ecore_dev *cdev; struct ecore_pf_params pf_params; int rc; memset(&pf_params, 0, sizeof(struct ecore_pf_params)); pf_params.eth_pf_params.num_cons = (ha->num_rss) * (ha->num_tc + 1); #ifdef QLNX_ENABLE_IWARP if (qlnx_vf_device(ha) != 0) { if(ha->personality == ECORE_PCI_ETH_IWARP) { device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n"); pf_params.rdma_pf_params.num_qps = 1024; pf_params.rdma_pf_params.num_srqs = 1024; pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX; pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_IWARP; } else if(ha->personality == ECORE_PCI_ETH_ROCE) { device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n"); pf_params.rdma_pf_params.num_qps = 8192; pf_params.rdma_pf_params.num_srqs = 8192; //pf_params.rdma_pf_params.min_dpis = 0; pf_params.rdma_pf_params.min_dpis = 8; pf_params.rdma_pf_params.roce_edpm_mode = 0; pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX; pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_ROCE; } } #endif /* #ifdef QLNX_ENABLE_IWARP */ cdev = &ha->cdev; rc = qlnx_nic_setup(cdev, &pf_params); if (rc) goto qlnx_slowpath_start_exit; cdev->int_mode = ECORE_INT_MODE_MSIX; cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE; #ifdef QLNX_MAX_COALESCE cdev->rx_coalesce_usecs = 255; cdev->tx_coalesce_usecs = 255; #endif rc = qlnx_nic_start(cdev); ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs; ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs; #ifdef QLNX_USER_LLDP (void)qlnx_set_lldp_tlvx(ha, NULL); #endif /* #ifdef QLNX_USER_LLDP */ qlnx_slowpath_start_exit: return (rc); } static int qlnx_slowpath_stop(qlnx_host_t *ha) { struct ecore_dev *cdev; device_t dev = ha->pci_dev; int i; cdev = &ha->cdev; ecore_hw_stop(cdev); for (i = 0; i < ha->cdev.num_hwfns; i++) { if (ha->sp_handle[i]) (void)bus_teardown_intr(dev, ha->sp_irq[i], ha->sp_handle[i]); ha->sp_handle[i] = NULL; if (ha->sp_irq[i]) (void) bus_release_resource(dev, SYS_RES_IRQ, ha->sp_irq_rid[i], ha->sp_irq[i]); ha->sp_irq[i] = NULL; } ecore_resc_free(cdev); return 0; } static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], char ver_str[VER_SIZE]) { int i; memcpy(cdev->name, name, NAME_SIZE); for_each_hwfn(cdev, i) { snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); } cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD; return ; } void qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats) { enum ecore_mcp_protocol_type type; union ecore_mcp_protocol_stats *stats; struct ecore_eth_stats eth_stats; qlnx_host_t *ha; ha = cdev; stats = proto_stats; type = proto_type; switch (type) { case ECORE_MCP_LAN_STATS: ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats); stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts; stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts; stats->lan_stats.fcs_err = -1; break; default: ha->err_get_proto_invalid_type++; QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type); break; } return; } static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver) { struct ecore_hwfn *p_hwfn; struct ecore_ptt *p_ptt; p_hwfn = &ha->cdev.hwfns[0]; p_ptt = ecore_ptt_acquire(p_hwfn); if (p_ptt == NULL) { QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); return (-1); } ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL); ecore_ptt_release(p_hwfn, p_ptt); return (0); } static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size) { struct ecore_hwfn *p_hwfn; struct ecore_ptt *p_ptt; p_hwfn = &ha->cdev.hwfns[0]; p_ptt = ecore_ptt_acquire(p_hwfn); if (p_ptt == NULL) { QL_DPRINT1(ha,"ecore_ptt_acquire failed\n"); return (-1); } ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size); ecore_ptt_release(p_hwfn, p_ptt); return (0); } static int qlnx_alloc_mem_arrays(qlnx_host_t *ha) { bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS)); bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS)); bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS)); return 0; } static void qlnx_init_fp(qlnx_host_t *ha) { int rss_id, txq_array_index, tc; for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; fp->rss_id = rss_id; fp->edev = ha; fp->sb_info = &ha->sb_array[rss_id]; fp->rxq = &ha->rxq_array[rss_id]; fp->rxq->rxq_id = rss_id; for (tc = 0; tc < ha->num_tc; tc++) { txq_array_index = tc * ha->num_rss + rss_id; fp->txq[tc] = &ha->txq_array[txq_array_index]; fp->txq[tc]->index = txq_array_index; } snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str, rss_id); fp->tx_ring_full = 0; /* reset all the statistics counters */ fp->tx_pkts_processed = 0; fp->tx_pkts_freed = 0; fp->tx_pkts_transmitted = 0; fp->tx_pkts_completed = 0; #ifdef QLNX_TRACE_PERF_DATA fp->tx_pkts_trans_ctx = 0; fp->tx_pkts_compl_ctx = 0; fp->tx_pkts_trans_fp = 0; fp->tx_pkts_compl_fp = 0; fp->tx_pkts_compl_intr = 0; #endif fp->tx_lso_wnd_min_len = 0; fp->tx_defrag = 0; fp->tx_nsegs_gt_elem_left = 0; fp->tx_tso_max_nsegs = 0; fp->tx_tso_min_nsegs = 0; fp->err_tx_nsegs_gt_elem_left = 0; fp->err_tx_dmamap_create = 0; fp->err_tx_defrag_dmamap_load = 0; fp->err_tx_non_tso_max_seg = 0; fp->err_tx_dmamap_load = 0; fp->err_tx_defrag = 0; fp->err_tx_free_pkt_null = 0; fp->err_tx_cons_idx_conflict = 0; fp->rx_pkts = 0; fp->err_m_getcl = 0; fp->err_m_getjcl = 0; } return; } void qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info) { struct ecore_dev *cdev; cdev = &ha->cdev; if (sb_info->sb_virt) { OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt), (sb_info->sb_phys), (sizeof(*sb_info->sb_virt))); sb_info->sb_virt = NULL; } } static int qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info, void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id) { struct ecore_hwfn *p_hwfn; int hwfn_index, rc; u16 rel_sb_id; hwfn_index = sb_id % cdev->num_hwfns; p_hwfn = &cdev->hwfns[hwfn_index]; rel_sb_id = sb_id / cdev->num_hwfns; QL_DPRINT2(((qlnx_host_t *)cdev), "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \ sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n", hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info, sb_virt_addr, (void *)sb_phy_addr); rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info, sb_virt_addr, sb_phy_addr, rel_sb_id); return rc; } /* This function allocates fast-path status block memory */ int qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id) { struct status_block_e4 *sb_virt; bus_addr_t sb_phys; int rc; uint32_t size; struct ecore_dev *cdev; cdev = &ha->cdev; size = sizeof(*sb_virt); sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size); if (!sb_virt) { QL_DPRINT1(ha, "Status block allocation failed\n"); return -ENOMEM; } rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id); if (rc) { OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size); } return rc; } static void qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) { int i; struct sw_rx_data *rx_buf; for (i = 0; i < rxq->num_rx_buffers; i++) { rx_buf = &rxq->sw_rx_ring[i]; if (rx_buf->data != NULL) { if (rx_buf->map != NULL) { bus_dmamap_unload(ha->rx_tag, rx_buf->map); bus_dmamap_destroy(ha->rx_tag, rx_buf->map); rx_buf->map = NULL; } m_freem(rx_buf->data); rx_buf->data = NULL; } } return; } static void qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) { struct ecore_dev *cdev; int i; cdev = &ha->cdev; qlnx_free_rx_buffers(ha, rxq); for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]); if (rxq->tpa_info[i].mpf != NULL) m_freem(rxq->tpa_info[i].mpf); } bzero((void *)&rxq->sw_rx_ring[0], (sizeof (struct sw_rx_data) * RX_RING_SIZE)); /* Free the real RQ ring used by FW */ if (rxq->rx_bd_ring.p_virt_addr) { ecore_chain_free(cdev, &rxq->rx_bd_ring); rxq->rx_bd_ring.p_virt_addr = NULL; } /* Free the real completion ring used by FW */ if (rxq->rx_comp_ring.p_virt_addr && rxq->rx_comp_ring.pbl_sp.p_virt_table) { ecore_chain_free(cdev, &rxq->rx_comp_ring); rxq->rx_comp_ring.p_virt_addr = NULL; rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL; } #ifdef QLNX_SOFT_LRO { struct lro_ctrl *lro; lro = &rxq->lro; tcp_lro_free(lro); } #endif /* #ifdef QLNX_SOFT_LRO */ return; } static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) { register struct mbuf *mp; uint16_t rx_buf_size; struct sw_rx_data *sw_rx_data; struct eth_rx_bd *rx_bd; dma_addr_t dma_addr; bus_dmamap_t map; bus_dma_segment_t segs[1]; int nsegs; int ret; rx_buf_size = rxq->rx_buf_size; mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); if (mp == NULL) { QL_DPRINT1(ha, "Failed to allocate Rx data\n"); return -ENOMEM; } mp->m_len = mp->m_pkthdr.len = rx_buf_size; map = (bus_dmamap_t)0; ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, BUS_DMA_NOWAIT); dma_addr = segs[0].ds_addr; if (ret || !dma_addr || (nsegs != 1)) { m_freem(mp); QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n", ret, (long long unsigned int)dma_addr, nsegs); return -ENOMEM; } sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; sw_rx_data->data = mp; sw_rx_data->dma_addr = dma_addr; sw_rx_data->map = map; /* Advance PROD and get BD pointer */ rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring); rx_bd->addr.hi = htole32(U64_HI(dma_addr)); rx_bd->addr.lo = htole32(U64_LO(dma_addr)); bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); return 0; } static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, struct qlnx_agg_info *tpa) { struct mbuf *mp; dma_addr_t dma_addr; bus_dmamap_t map; bus_dma_segment_t segs[1]; int nsegs; int ret; struct sw_rx_data *rx_buf; mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); if (mp == NULL) { QL_DPRINT1(ha, "Failed to allocate Rx data\n"); return -ENOMEM; } mp->m_len = mp->m_pkthdr.len = rx_buf_size; map = (bus_dmamap_t)0; ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, BUS_DMA_NOWAIT); dma_addr = segs[0].ds_addr; if (ret || !dma_addr || (nsegs != 1)) { m_freem(mp); QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n", ret, (long long unsigned int)dma_addr, nsegs); return -ENOMEM; } rx_buf = &tpa->rx_buf; memset(rx_buf, 0, sizeof (struct sw_rx_data)); rx_buf->data = mp; rx_buf->dma_addr = dma_addr; rx_buf->map = map; bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); return (0); } static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa) { struct sw_rx_data *rx_buf; rx_buf = &tpa->rx_buf; if (rx_buf->data != NULL) { if (rx_buf->map != NULL) { bus_dmamap_unload(ha->rx_tag, rx_buf->map); bus_dmamap_destroy(ha->rx_tag, rx_buf->map); rx_buf->map = NULL; } m_freem(rx_buf->data); rx_buf->data = NULL; } return; } /* This function allocates all memory needed per Rx queue */ static int qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) { int i, rc, num_allocated; struct ecore_dev *cdev; cdev = &ha->cdev; rxq->num_rx_buffers = RX_RING_SIZE; rxq->rx_buf_size = ha->rx_buf_size; /* Allocate the parallel driver ring for Rx buffers */ bzero((void *)&rxq->sw_rx_ring[0], (sizeof (struct sw_rx_data) * RX_RING_SIZE)); /* Allocate FW Rx ring */ rc = ecore_chain_alloc(cdev, ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, ECORE_CHAIN_MODE_NEXT_PTR, ECORE_CHAIN_CNT_TYPE_U16, RX_RING_SIZE, sizeof(struct eth_rx_bd), &rxq->rx_bd_ring, NULL); if (rc) goto err; /* Allocate FW completion ring */ rc = ecore_chain_alloc(cdev, ECORE_CHAIN_USE_TO_CONSUME, ECORE_CHAIN_MODE_PBL, ECORE_CHAIN_CNT_TYPE_U16, RX_RING_SIZE, sizeof(union eth_rx_cqe), &rxq->rx_comp_ring, NULL); if (rc) goto err; /* Allocate buffers for the Rx ring */ for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size, &rxq->tpa_info[i]); if (rc) break; } for (i = 0; i < rxq->num_rx_buffers; i++) { rc = qlnx_alloc_rx_buffer(ha, rxq); if (rc) break; } num_allocated = i; if (!num_allocated) { QL_DPRINT1(ha, "Rx buffers allocation failed\n"); goto err; } else if (num_allocated < rxq->num_rx_buffers) { QL_DPRINT1(ha, "Allocated less buffers than" " desired (%d allocated)\n", num_allocated); } #ifdef QLNX_SOFT_LRO { struct lro_ctrl *lro; lro = &rxq->lro; if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) { QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n", rxq->rxq_id); goto err; } lro->ifp = ha->ifp; } #endif /* #ifdef QLNX_SOFT_LRO */ return 0; err: qlnx_free_mem_rxq(ha, rxq); return -ENOMEM; } static void qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct qlnx_tx_queue *txq) { struct ecore_dev *cdev; cdev = &ha->cdev; bzero((void *)&txq->sw_tx_ring[0], (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); /* Free the real RQ ring used by FW */ if (txq->tx_pbl.p_virt_addr) { ecore_chain_free(cdev, &txq->tx_pbl); txq->tx_pbl.p_virt_addr = NULL; } return; } /* This function allocates all memory needed per Tx queue */ static int qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct qlnx_tx_queue *txq) { int ret = ECORE_SUCCESS; union eth_tx_bd_types *p_virt; struct ecore_dev *cdev; cdev = &ha->cdev; bzero((void *)&txq->sw_tx_ring[0], (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); /* Allocate the real Tx ring to be used by FW */ ret = ecore_chain_alloc(cdev, ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, ECORE_CHAIN_MODE_PBL, ECORE_CHAIN_CNT_TYPE_U16, TX_RING_SIZE, sizeof(*p_virt), &txq->tx_pbl, NULL); if (ret != ECORE_SUCCESS) { goto err; } txq->num_tx_buffers = TX_RING_SIZE; return 0; err: qlnx_free_mem_txq(ha, fp, txq); return -ENOMEM; } static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) { struct mbuf *mp; if_t ifp = ha->ifp; if (mtx_initialized(&fp->tx_mtx)) { if (fp->tx_br != NULL) { mtx_lock(&fp->tx_mtx); while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { fp->tx_pkts_freed++; m_freem(mp); } mtx_unlock(&fp->tx_mtx); buf_ring_free(fp->tx_br, M_DEVBUF); fp->tx_br = NULL; } mtx_destroy(&fp->tx_mtx); } return; } static void qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) { int tc; qlnx_free_mem_sb(ha, fp->sb_info); qlnx_free_mem_rxq(ha, fp->rxq); for (tc = 0; tc < ha->num_tc; tc++) qlnx_free_mem_txq(ha, fp, fp->txq[tc]); return; } static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) { snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id); mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF, M_NOWAIT, &fp->tx_mtx); if (fp->tx_br == NULL) { QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n", ha->dev_unit, fp->rss_id); return -ENOMEM; } return 0; } static int qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) { int rc, tc; rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id); if (rc) goto err; if (ha->rx_jumbo_buf_eq_mtu) { if (ha->max_frame_size <= MCLBYTES) ha->rx_buf_size = MCLBYTES; else if (ha->max_frame_size <= MJUMPAGESIZE) ha->rx_buf_size = MJUMPAGESIZE; else if (ha->max_frame_size <= MJUM9BYTES) ha->rx_buf_size = MJUM9BYTES; else if (ha->max_frame_size <= MJUM16BYTES) ha->rx_buf_size = MJUM16BYTES; } else { if (ha->max_frame_size <= MCLBYTES) ha->rx_buf_size = MCLBYTES; else ha->rx_buf_size = MJUMPAGESIZE; } rc = qlnx_alloc_mem_rxq(ha, fp->rxq); if (rc) goto err; for (tc = 0; tc < ha->num_tc; tc++) { rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]); if (rc) goto err; } return 0; err: qlnx_free_mem_fp(ha, fp); return -ENOMEM; } static void qlnx_free_mem_load(qlnx_host_t *ha) { int i; for (i = 0; i < ha->num_rss; i++) { struct qlnx_fastpath *fp = &ha->fp_array[i]; qlnx_free_mem_fp(ha, fp); } return; } static int qlnx_alloc_mem_load(qlnx_host_t *ha) { int rc = 0, rss_id; for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; rc = qlnx_alloc_mem_fp(ha, fp); if (rc) break; } return (rc); } static int qlnx_start_vport(struct ecore_dev *cdev, u8 vport_id, u16 mtu, u8 drop_ttl0_flg, u8 inner_vlan_removal_en_flg, u8 tx_switching, u8 hw_lro_enable) { int rc, i; struct ecore_sp_vport_start_params vport_start_params = { 0 }; qlnx_host_t *ha __unused; ha = (qlnx_host_t *)cdev; vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg; vport_start_params.tx_switching = 0; vport_start_params.handle_ptp_pkts = 0; vport_start_params.only_untagged = 0; vport_start_params.drop_ttl0 = drop_ttl0_flg; vport_start_params.tpa_mode = (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE); vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; vport_start_params.vport_id = vport_id; vport_start_params.mtu = mtu; QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id); for_each_hwfn(cdev, i) { struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid; vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid; rc = ecore_sp_vport_start(p_hwfn, &vport_start_params); if (rc) { QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d" " with MTU %d\n" , vport_id, mtu); return -ENOMEM; } ecore_hw_start_fastpath(p_hwfn); QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n", vport_id, mtu); } return 0; } static int qlnx_update_vport(struct ecore_dev *cdev, struct qlnx_update_vport_params *params) { struct ecore_sp_vport_update_params sp_params; int rc, i, j, fp_index; struct ecore_hwfn *p_hwfn; struct ecore_rss_params *rss; qlnx_host_t *ha = (qlnx_host_t *)cdev; struct qlnx_fastpath *fp; memset(&sp_params, 0, sizeof(sp_params)); /* Translate protocol params into sp params */ sp_params.vport_id = params->vport_id; sp_params.update_vport_active_rx_flg = params->update_vport_active_rx_flg; sp_params.vport_active_rx_flg = params->vport_active_rx_flg; sp_params.update_vport_active_tx_flg = params->update_vport_active_tx_flg; sp_params.vport_active_tx_flg = params->vport_active_tx_flg; sp_params.update_inner_vlan_removal_flg = params->update_inner_vlan_removal_flg; sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg; sp_params.sge_tpa_params = params->sge_tpa_params; /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. * We need to re-fix the rss values per engine for CMT. */ if (params->rss_params->update_rss_config) sp_params.rss_params = params->rss_params; else sp_params.rss_params = NULL; for_each_hwfn(cdev, i) { p_hwfn = &cdev->hwfns[i]; if ((cdev->num_hwfns > 1) && params->rss_params->update_rss_config && params->rss_params->rss_enable) { rss = params->rss_params; for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) { fp_index = ((cdev->num_hwfns * j) + i) % ha->num_rss; fp = &ha->fp_array[fp_index]; rss->rss_ind_table[j] = fp->rxq->handle; } for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) { QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n", rss->rss_ind_table[j], rss->rss_ind_table[j+1], rss->rss_ind_table[j+2], rss->rss_ind_table[j+3], rss->rss_ind_table[j+4], rss->rss_ind_table[j+5], rss->rss_ind_table[j+6], rss->rss_ind_table[j+7]); j += 8; } } sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id); rc = ecore_sp_vport_update(p_hwfn, &sp_params, ECORE_SPQ_MODE_EBLOCK, NULL); if (rc) { QL_DPRINT1(ha, "Failed to update VPORT\n"); return rc; } QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \ rx_active_flag %d [tx_update %d], [rx_update %d]\n", params->vport_id, params->vport_active_tx_flg, params->vport_active_rx_flg, params->update_vport_active_tx_flg, params->update_vport_active_rx_flg); } return 0; } static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq) { struct eth_rx_bd *rx_bd_cons = ecore_chain_consume(&rxq->rx_bd_ring); struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring); struct sw_rx_data *sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons]; struct sw_rx_data *sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod]; sw_rx_data_prod->data = sw_rx_data_cons->data; memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd)); rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); return; } static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq) { uint16_t bd_prod; uint16_t cqe_prod; union { struct eth_rx_prod_data rx_prod_data; uint32_t data32; } rx_prods; bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring); cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring); /* Update producers */ rx_prods.rx_prod_data.bd_prod = htole16(bd_prod); rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod); /* Make sure that the BD and SGE data is updated before updating the * producers since FW might read the BD/SGE right after the producer * is updated. */ wmb(); #ifdef ECORE_CONFIG_DIRECT_HWFN internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr, sizeof(rx_prods), &rx_prods.data32); #else internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), &rx_prods.data32); #endif /* mmiowb is needed to synchronize doorbell writes from more than one * processor. It guarantees that the write arrives to the device before * the napi lock is released and another qlnx_poll is called (possibly * on another CPU). Without this barrier, the next doorbell can bypass * this doorbell. This is applicable to IA64/Altix systems. */ wmb(); return; } static uint32_t qlnx_hash_key[] = { ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda), ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2), ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d), ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0), ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb), ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4), ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3), ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c), ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b), ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)}; static int qlnx_start_queues(qlnx_host_t *ha) { int rc, tc, i, vport_id = 0, drop_ttl0_flg = 1, vlan_removal_en = 1, tx_switching = 0, hw_lro_enable = 0; struct ecore_dev *cdev = &ha->cdev; struct ecore_rss_params *rss_params = &ha->rss_params; struct qlnx_update_vport_params vport_update_params; if_t ifp; struct ecore_hwfn *p_hwfn; struct ecore_sge_tpa_params tpa_params; struct ecore_queue_start_common_params qparams; struct qlnx_fastpath *fp; ifp = ha->ifp; QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss); if (!ha->num_rss) { QL_DPRINT1(ha, "Cannot update V-VPORT as active as there" " are no Rx queues\n"); return -EINVAL; } #ifndef QLNX_SOFT_LRO hw_lro_enable = if_getcapenable(ifp) & IFCAP_LRO; #endif /* #ifndef QLNX_SOFT_LRO */ rc = qlnx_start_vport(cdev, vport_id, if_getmtu(ifp), drop_ttl0_flg, vlan_removal_en, tx_switching, hw_lro_enable); if (rc) { QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc); return rc; } QL_DPRINT2(ha, "Start vport ramrod passed, " "vport_id = %d, MTU = %d, vlan_removal_en = %d\n", vport_id, (int)(if_getmtu(ifp) + 0xe), vlan_removal_en); for_each_rss(i) { struct ecore_rxq_start_ret_params rx_ret_params; struct ecore_txq_start_ret_params tx_ret_params; fp = &ha->fp_array[i]; p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)]; bzero(&qparams, sizeof(struct ecore_queue_start_common_params)); bzero(&rx_ret_params, sizeof (struct ecore_rxq_start_ret_params)); qparams.queue_id = i ; qparams.vport_id = vport_id; qparams.stats_id = vport_id; qparams.p_sb = fp->sb_info; qparams.sb_idx = RX_PI; rc = ecore_eth_rx_queue_start(p_hwfn, p_hwfn->hw_info.opaque_fid, &qparams, fp->rxq->rx_buf_size, /* bd_max_bytes */ /* bd_chain_phys_addr */ fp->rxq->rx_bd_ring.p_phys_addr, /* cqe_pbl_addr */ ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring), /* cqe_pbl_size */ ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring), &rx_ret_params); if (rc) { QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc); return rc; } fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod; fp->rxq->handle = rx_ret_params.p_handle; fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI]; qlnx_update_rx_prod(p_hwfn, fp->rxq); for (tc = 0; tc < ha->num_tc; tc++) { struct qlnx_tx_queue *txq = fp->txq[tc]; bzero(&qparams, sizeof(struct ecore_queue_start_common_params)); bzero(&tx_ret_params, sizeof (struct ecore_txq_start_ret_params)); qparams.queue_id = txq->index / cdev->num_hwfns ; qparams.vport_id = vport_id; qparams.stats_id = vport_id; qparams.p_sb = fp->sb_info; qparams.sb_idx = TX_PI(tc); rc = ecore_eth_tx_queue_start(p_hwfn, p_hwfn->hw_info.opaque_fid, &qparams, tc, /* bd_chain_phys_addr */ ecore_chain_get_pbl_phys(&txq->tx_pbl), ecore_chain_get_page_cnt(&txq->tx_pbl), &tx_ret_params); if (rc) { QL_DPRINT1(ha, "Start TXQ #%d failed %d\n", txq->index, rc); return rc; } txq->doorbell_addr = tx_ret_params.p_doorbell; txq->handle = tx_ret_params.p_handle; txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[TX_PI(tc)]; SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM); SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL, DQ_XCM_ETH_TX_BD_PROD_CMD); txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; } } /* Fill struct with RSS params */ if (ha->num_rss > 1) { rss_params->update_rss_config = 1; rss_params->rss_enable = 1; rss_params->update_rss_capabilities = 1; rss_params->update_rss_ind_table = 1; rss_params->update_rss_key = 1; rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 | ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP; rss_params->rss_table_size_log = 7; /* 2^7 = 128 */ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { fp = &ha->fp_array[(i % ha->num_rss)]; rss_params->rss_ind_table[i] = fp->rxq->handle; } for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) rss_params->rss_key[i] = (__le32)qlnx_hash_key[i]; } else { memset(rss_params, 0, sizeof(*rss_params)); } /* Prepare and send the vport enable */ memset(&vport_update_params, 0, sizeof(vport_update_params)); vport_update_params.vport_id = vport_id; vport_update_params.update_vport_active_tx_flg = 1; vport_update_params.vport_active_tx_flg = 1; vport_update_params.update_vport_active_rx_flg = 1; vport_update_params.vport_active_rx_flg = 1; vport_update_params.rss_params = rss_params; vport_update_params.update_inner_vlan_removal_flg = 1; vport_update_params.inner_vlan_removal_flg = 1; if (hw_lro_enable) { memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params)); tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; tpa_params.update_tpa_en_flg = 1; tpa_params.tpa_ipv4_en_flg = 1; tpa_params.tpa_ipv6_en_flg = 1; tpa_params.update_tpa_param_flg = 1; tpa_params.tpa_pkt_split_flg = 0; tpa_params.tpa_hdr_data_split_flg = 0; tpa_params.tpa_gro_consistent_flg = 0; tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; tpa_params.tpa_max_size = (uint16_t)(-1); tpa_params.tpa_min_size_to_start = if_getmtu(ifp) / 2; tpa_params.tpa_min_size_to_cont = if_getmtu(ifp) / 2; vport_update_params.sge_tpa_params = &tpa_params; } rc = qlnx_update_vport(cdev, &vport_update_params); if (rc) { QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc); return rc; } return 0; } static int qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct qlnx_tx_queue *txq) { uint16_t hw_bd_cons; uint16_t ecore_cons_idx; QL_DPRINT2(ha, "enter\n"); hw_bd_cons = le16toh(*txq->hw_cons_ptr); while (hw_bd_cons != (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { mtx_lock(&fp->tx_mtx); (void)qlnx_tx_int(ha, fp, txq); mtx_unlock(&fp->tx_mtx); qlnx_mdelay(__func__, 2); hw_bd_cons = le16toh(*txq->hw_cons_ptr); } QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index); return 0; } static int qlnx_stop_queues(qlnx_host_t *ha) { struct qlnx_update_vport_params vport_update_params; struct ecore_dev *cdev; struct qlnx_fastpath *fp; int rc, tc, i; cdev = &ha->cdev; /* Disable the vport */ memset(&vport_update_params, 0, sizeof(vport_update_params)); vport_update_params.vport_id = 0; vport_update_params.update_vport_active_tx_flg = 1; vport_update_params.vport_active_tx_flg = 0; vport_update_params.update_vport_active_rx_flg = 1; vport_update_params.vport_active_rx_flg = 0; vport_update_params.rss_params = &ha->rss_params; vport_update_params.rss_params->update_rss_config = 0; vport_update_params.rss_params->rss_enable = 0; vport_update_params.update_inner_vlan_removal_flg = 0; vport_update_params.inner_vlan_removal_flg = 0; QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id); rc = qlnx_update_vport(cdev, &vport_update_params); if (rc) { QL_DPRINT1(ha, "Failed to update vport\n"); return rc; } /* Flush Tx queues. If needed, request drain from MCP */ for_each_rss(i) { fp = &ha->fp_array[i]; for (tc = 0; tc < ha->num_tc; tc++) { struct qlnx_tx_queue *txq = fp->txq[tc]; rc = qlnx_drain_txq(ha, fp, txq); if (rc) return rc; } } /* Stop all Queues in reverse order*/ for (i = ha->num_rss - 1; i >= 0; i--) { struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)]; fp = &ha->fp_array[i]; /* Stop the Tx Queue(s)*/ for (tc = 0; tc < ha->num_tc; tc++) { int tx_queue_id __unused; tx_queue_id = tc * ha->num_rss + i; rc = ecore_eth_tx_queue_stop(p_hwfn, fp->txq[tc]->handle); if (rc) { QL_DPRINT1(ha, "Failed to stop TXQ #%d\n", tx_queue_id); return rc; } } /* Stop the Rx Queue*/ rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false, false); if (rc) { QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i); return rc; } } /* Stop the vport */ for_each_hwfn(cdev, i) { struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0); if (rc) { QL_DPRINT1(ha, "Failed to stop VPORT\n"); return rc; } } return rc; } static int qlnx_set_ucast_rx_mac(qlnx_host_t *ha, enum ecore_filter_opcode opcode, unsigned char mac[ETH_ALEN]) { struct ecore_filter_ucast ucast; struct ecore_dev *cdev; int rc; cdev = &ha->cdev; bzero(&ucast, sizeof(struct ecore_filter_ucast)); ucast.opcode = opcode; ucast.type = ECORE_FILTER_MAC; ucast.is_rx_filter = 1; ucast.vport_to_add_to = 0; memcpy(&ucast.mac[0], mac, ETH_ALEN); rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); return (rc); } static int qlnx_remove_all_ucast_mac(qlnx_host_t *ha) { struct ecore_filter_ucast ucast; struct ecore_dev *cdev; int rc; bzero(&ucast, sizeof(struct ecore_filter_ucast)); ucast.opcode = ECORE_FILTER_REPLACE; ucast.type = ECORE_FILTER_MAC; ucast.is_rx_filter = 1; cdev = &ha->cdev; rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); return (rc); } static int qlnx_remove_all_mcast_mac(qlnx_host_t *ha) { struct ecore_filter_mcast *mcast; struct ecore_dev *cdev; int rc, i; cdev = &ha->cdev; mcast = &ha->ecore_mcast; bzero(mcast, sizeof(struct ecore_filter_mcast)); mcast->opcode = ECORE_FILTER_REMOVE; for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] || ha->mcast[i].addr[2] || ha->mcast[i].addr[3] || ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) { memcpy(&mcast->mac[i][0], &ha->mcast[i].addr[0], ETH_ALEN); mcast->num_mc_addrs++; } } mcast = &ha->ecore_mcast; rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS)); ha->nmcast = 0; return (rc); } static int qlnx_clean_filters(qlnx_host_t *ha) { int rc = 0; /* Remove all unicast macs */ rc = qlnx_remove_all_ucast_mac(ha); if (rc) return rc; /* Remove all multicast macs */ rc = qlnx_remove_all_mcast_mac(ha); if (rc) return rc; rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac); return (rc); } static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter) { struct ecore_filter_accept_flags accept; int rc = 0; struct ecore_dev *cdev; cdev = &ha->cdev; bzero(&accept, sizeof(struct ecore_filter_accept_flags)); accept.update_rx_mode_config = 1; accept.rx_accept_filter = filter; accept.update_tx_mode_config = 1; accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST; rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false, ECORE_SPQ_MODE_CB, NULL); return (rc); } static int qlnx_set_rx_mode(qlnx_host_t *ha) { int rc = 0; uint8_t filter; rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac); if (rc) return rc; rc = qlnx_remove_all_mcast_mac(ha); if (rc) return rc; filter = ECORE_ACCEPT_UCAST_MATCHED | ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST; if (qlnx_vf_device(ha) == 0 || (ha->ifp->if_flags & IFF_PROMISC)) { filter |= ECORE_ACCEPT_UCAST_UNMATCHED; filter |= ECORE_ACCEPT_MCAST_UNMATCHED; } else if (ha->ifp->if_flags & IFF_ALLMULTI) { filter |= ECORE_ACCEPT_MCAST_UNMATCHED; } ha->filter = filter; rc = qlnx_set_rx_accept_filter(ha, filter); return (rc); } static int qlnx_set_link(qlnx_host_t *ha, bool link_up) { int i, rc = 0; struct ecore_dev *cdev; struct ecore_hwfn *hwfn; struct ecore_ptt *ptt; if (qlnx_vf_device(ha) == 0) return (0); cdev = &ha->cdev; for_each_hwfn(cdev, i) { hwfn = &cdev->hwfns[i]; ptt = ecore_ptt_acquire(hwfn); if (!ptt) return -EBUSY; rc = ecore_mcp_set_link(hwfn, ptt, link_up); ecore_ptt_release(hwfn, ptt); if (rc) return rc; } return (rc); } static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt) { qlnx_host_t *ha; uint64_t count; ha = (qlnx_host_t *)if_getsoftc(ifp); switch (cnt) { case IFCOUNTER_IPACKETS: count = ha->hw_stats.common.rx_ucast_pkts + ha->hw_stats.common.rx_mcast_pkts + ha->hw_stats.common.rx_bcast_pkts; break; case IFCOUNTER_IERRORS: count = ha->hw_stats.common.rx_crc_errors + ha->hw_stats.common.rx_align_errors + ha->hw_stats.common.rx_oversize_packets + ha->hw_stats.common.rx_undersize_packets; break; case IFCOUNTER_OPACKETS: count = ha->hw_stats.common.tx_ucast_pkts + ha->hw_stats.common.tx_mcast_pkts + ha->hw_stats.common.tx_bcast_pkts; break; case IFCOUNTER_OERRORS: count = ha->hw_stats.common.tx_err_drop_pkts; break; case IFCOUNTER_COLLISIONS: return (0); case IFCOUNTER_IBYTES: count = ha->hw_stats.common.rx_ucast_bytes + ha->hw_stats.common.rx_mcast_bytes + ha->hw_stats.common.rx_bcast_bytes; break; case IFCOUNTER_OBYTES: count = ha->hw_stats.common.tx_ucast_bytes + ha->hw_stats.common.tx_mcast_bytes + ha->hw_stats.common.tx_bcast_bytes; break; case IFCOUNTER_IMCASTS: count = ha->hw_stats.common.rx_mcast_bytes; break; case IFCOUNTER_OMCASTS: count = ha->hw_stats.common.tx_mcast_bytes; break; case IFCOUNTER_IQDROPS: case IFCOUNTER_OQDROPS: case IFCOUNTER_NOPROTO: default: return (if_get_counter_default(ifp, cnt)); } return (count); } static void qlnx_timer(void *arg) { qlnx_host_t *ha; ha = (qlnx_host_t *)arg; if (ha->error_recovery) { ha->error_recovery = 0; taskqueue_enqueue(ha->err_taskqueue, &ha->err_task); return; } ecore_get_vport_stats(&ha->cdev, &ha->hw_stats); if (ha->storm_stats_gather) qlnx_sample_storm_stats(ha); callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); return; } static int qlnx_load(qlnx_host_t *ha) { int i; int rc = 0; device_t dev; dev = ha->pci_dev; QL_DPRINT2(ha, "enter\n"); rc = qlnx_alloc_mem_arrays(ha); if (rc) goto qlnx_load_exit0; qlnx_init_fp(ha); rc = qlnx_alloc_mem_load(ha); if (rc) goto qlnx_load_exit1; QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n", ha->num_rss, ha->num_tc); for (i = 0; i < ha->num_rss; i++) { if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq, (INTR_TYPE_NET | INTR_MPSAFE), NULL, qlnx_fp_isr, &ha->irq_vec[i], &ha->irq_vec[i].handle))) { QL_DPRINT1(ha, "could not setup interrupt\n"); goto qlnx_load_exit2; } QL_DPRINT2(ha, "rss_id = %d irq_rid %d \ irq %p handle %p\n", i, ha->irq_vec[i].irq_rid, ha->irq_vec[i].irq, ha->irq_vec[i].handle); bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus)); } rc = qlnx_start_queues(ha); if (rc) goto qlnx_load_exit2; QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n"); /* Add primary mac and set Rx filters */ rc = qlnx_set_rx_mode(ha); if (rc) goto qlnx_load_exit2; /* Ask for link-up using current configuration */ qlnx_set_link(ha, true); if (qlnx_vf_device(ha) == 0) qlnx_link_update(&ha->cdev.hwfns[0]); ha->state = QLNX_STATE_OPEN; bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats)); if (ha->flags.callout_init) callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); goto qlnx_load_exit0; qlnx_load_exit2: qlnx_free_mem_load(ha); qlnx_load_exit1: ha->num_rss = 0; qlnx_load_exit0: QL_DPRINT2(ha, "exit [%d]\n", rc); return rc; } static void qlnx_drain_soft_lro(qlnx_host_t *ha) { #ifdef QLNX_SOFT_LRO if_t ifp; int i; ifp = ha->ifp; if (if_getcapenable(ifp) & IFCAP_LRO) { for (i = 0; i < ha->num_rss; i++) { struct qlnx_fastpath *fp = &ha->fp_array[i]; struct lro_ctrl *lro; lro = &fp->rxq->lro; tcp_lro_flush_all(lro); } } #endif /* #ifdef QLNX_SOFT_LRO */ return; } static void qlnx_unload(qlnx_host_t *ha) { struct ecore_dev *cdev; device_t dev; int i; cdev = &ha->cdev; dev = ha->pci_dev; QL_DPRINT2(ha, "enter\n"); QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state); if (ha->state == QLNX_STATE_OPEN) { qlnx_set_link(ha, false); qlnx_clean_filters(ha); qlnx_stop_queues(ha); ecore_hw_stop_fastpath(cdev); for (i = 0; i < ha->num_rss; i++) { if (ha->irq_vec[i].handle) { (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, ha->irq_vec[i].handle); ha->irq_vec[i].handle = NULL; } } qlnx_drain_fp_taskqueues(ha); qlnx_drain_soft_lro(ha); qlnx_free_mem_load(ha); } if (ha->flags.callout_init) callout_drain(&ha->qlnx_callout); qlnx_mdelay(__func__, 1000); ha->state = QLNX_STATE_CLOSED; QL_DPRINT2(ha, "exit\n"); return; } static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) { int rval = -1; struct ecore_hwfn *p_hwfn; struct ecore_ptt *p_ptt; ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); p_hwfn = &ha->cdev.hwfns[hwfn_index]; p_ptt = ecore_ptt_acquire(p_hwfn); if (!p_ptt) { QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); return (rval); } rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); if (rval == DBG_STATUS_OK) rval = 0; else { QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed" "[0x%x]\n", rval); } ecore_ptt_release(p_hwfn, p_ptt); return (rval); } static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) { int rval = -1; struct ecore_hwfn *p_hwfn; struct ecore_ptt *p_ptt; ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); p_hwfn = &ha->cdev.hwfns[hwfn_index]; p_ptt = ecore_ptt_acquire(p_hwfn); if (!p_ptt) { QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); return (rval); } rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); if (rval == DBG_STATUS_OK) rval = 0; else { QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed" " [0x%x]\n", rval); } ecore_ptt_release(p_hwfn, p_ptt); return (rval); } static void qlnx_sample_storm_stats(qlnx_host_t *ha) { int i, index; struct ecore_dev *cdev; qlnx_storm_stats_t *s_stats; uint32_t reg; struct ecore_ptt *p_ptt; struct ecore_hwfn *hwfn; if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) { ha->storm_stats_gather = 0; return; } cdev = &ha->cdev; for_each_hwfn(cdev, i) { hwfn = &cdev->hwfns[i]; p_ptt = ecore_ptt_acquire(hwfn); if (!p_ptt) return; index = ha->storm_stats_index + (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN); s_stats = &ha->storm_stats[index]; /* XSTORM */ reg = XSEM_REG_FAST_MEMORY + SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); reg = XSEM_REG_FAST_MEMORY + SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); reg = XSEM_REG_FAST_MEMORY + SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); reg = XSEM_REG_FAST_MEMORY + SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); /* YSTORM */ reg = YSEM_REG_FAST_MEMORY + SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); reg = YSEM_REG_FAST_MEMORY + SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); reg = YSEM_REG_FAST_MEMORY + SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); reg = YSEM_REG_FAST_MEMORY + SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); /* PSTORM */ reg = PSEM_REG_FAST_MEMORY + SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); reg = PSEM_REG_FAST_MEMORY + SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); reg = PSEM_REG_FAST_MEMORY + SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); reg = PSEM_REG_FAST_MEMORY + SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); /* TSTORM */ reg = TSEM_REG_FAST_MEMORY + SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); reg = TSEM_REG_FAST_MEMORY + SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); reg = TSEM_REG_FAST_MEMORY + SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); reg = TSEM_REG_FAST_MEMORY + SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); /* MSTORM */ reg = MSEM_REG_FAST_MEMORY + SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); reg = MSEM_REG_FAST_MEMORY + SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); reg = MSEM_REG_FAST_MEMORY + SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); reg = MSEM_REG_FAST_MEMORY + SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); /* USTORM */ reg = USEM_REG_FAST_MEMORY + SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); reg = USEM_REG_FAST_MEMORY + SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); reg = USEM_REG_FAST_MEMORY + SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); reg = USEM_REG_FAST_MEMORY + SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); ecore_ptt_release(hwfn, p_ptt); } ha->storm_stats_index++; return; } /* * Name: qlnx_dump_buf8 * Function: dumps a buffer as bytes */ static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len) { device_t dev; uint32_t i = 0; uint8_t *buf; dev = ha->pci_dev; buf = dbuf; device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len); while (len >= 16) { device_printf(dev,"0x%08x:" " %02x %02x %02x %02x %02x %02x %02x %02x" " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], buf[13], buf[14], buf[15]); i += 16; len -= 16; buf += 16; } switch (len) { case 1: device_printf(dev,"0x%08x: %02x\n", i, buf[0]); break; case 2: device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]); break; case 3: device_printf(dev,"0x%08x: %02x %02x %02x\n", i, buf[0], buf[1], buf[2]); break; case 4: device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i, buf[0], buf[1], buf[2], buf[3]); break; case 5: device_printf(dev,"0x%08x:" " %02x %02x %02x %02x %02x\n", i, buf[0], buf[1], buf[2], buf[3], buf[4]); break; case 6: device_printf(dev,"0x%08x:" " %02x %02x %02x %02x %02x %02x\n", i, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]); break; case 7: device_printf(dev,"0x%08x:" " %02x %02x %02x %02x %02x %02x %02x\n", i, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]); break; case 8: device_printf(dev,"0x%08x:" " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); break; case 9: device_printf(dev,"0x%08x:" " %02x %02x %02x %02x %02x %02x %02x %02x" " %02x\n", i, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], buf[8]); break; case 10: device_printf(dev,"0x%08x:" " %02x %02x %02x %02x %02x %02x %02x %02x" " %02x %02x\n", i, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], buf[8], buf[9]); break; case 11: device_printf(dev,"0x%08x:" " %02x %02x %02x %02x %02x %02x %02x %02x" " %02x %02x %02x\n", i, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], buf[8], buf[9], buf[10]); break; case 12: device_printf(dev,"0x%08x:" " %02x %02x %02x %02x %02x %02x %02x %02x" " %02x %02x %02x %02x\n", i, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], buf[8], buf[9], buf[10], buf[11]); break; case 13: device_printf(dev,"0x%08x:" " %02x %02x %02x %02x %02x %02x %02x %02x" " %02x %02x %02x %02x %02x\n", i, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]); break; case 14: device_printf(dev,"0x%08x:" " %02x %02x %02x %02x %02x %02x %02x %02x" " %02x %02x %02x %02x %02x %02x\n", i, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], buf[13]); break; case 15: device_printf(dev,"0x%08x:" " %02x %02x %02x %02x %02x %02x %02x %02x" " %02x %02x %02x %02x %02x %02x %02x\n", i, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], buf[13], buf[14]); break; default: break; } device_printf(dev, "%s: %s dump end\n", __func__, msg); return; } #ifdef CONFIG_ECORE_SRIOV static void __qlnx_osal_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, uint8_t rel_vf_id) { struct ecore_public_vf_info *vf_info; vf_info = ecore_iov_get_public_vf_info(p_hwfn, rel_vf_id, false); if (!vf_info) return; /* Clear the VF mac */ memset(vf_info->forced_mac, 0, ETH_ALEN); vf_info->forced_vlan = 0; return; } void qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id) { __qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id); return; } static int __qlnx_iov_chk_ucast(struct ecore_hwfn *p_hwfn, int vfid, struct ecore_filter_ucast *params) { struct ecore_public_vf_info *vf; if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) { QL_DPRINT1(((qlnx_host_t *)p_hwfn->p_dev), "VF[%d] vport not initialized\n", vfid); return ECORE_INVAL; } vf = ecore_iov_get_public_vf_info(p_hwfn, vfid, true); if (!vf) return -EINVAL; /* No real decision to make; Store the configured MAC */ if (params->type == ECORE_FILTER_MAC || params->type == ECORE_FILTER_MAC_VLAN) memcpy(params->mac, vf->forced_mac, ETH_ALEN); return 0; } int qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params) { return (__qlnx_iov_chk_ucast(p_hwfn, vfid, params)); } static int __qlnx_iov_update_vport(struct ecore_hwfn *hwfn, uint8_t vfid, struct ecore_sp_vport_update_params *params, uint16_t * tlvs) { if (!ecore_iov_vf_has_vport_instance(hwfn, vfid)) { QL_DPRINT1(((qlnx_host_t *)hwfn->p_dev), "VF[%d] vport not initialized\n", vfid); return ECORE_INVAL; } /* Untrusted VFs can't even be trusted to know that fact. * Simply indicate everything is configured fine, and trace * configuration 'behind their back'. */ if (!(*tlvs & BIT(ECORE_IOV_VP_UPDATE_ACCEPT_PARAM))) return 0; return 0; } int qlnx_iov_update_vport(void *hwfn, uint8_t vfid, void *params, uint16_t *tlvs) { return(__qlnx_iov_update_vport(hwfn, vfid, params, tlvs)); } static int qlnx_find_hwfn_index(struct ecore_hwfn *p_hwfn) { int i; struct ecore_dev *cdev; cdev = p_hwfn->p_dev; for (i = 0; i < cdev->num_hwfns; i++) { if (&cdev->hwfns[i] == p_hwfn) break; } if (i >= cdev->num_hwfns) return (-1); return (i); } static int __qlnx_pf_vf_msg(struct ecore_hwfn *p_hwfn, uint16_t rel_vf_id) { qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; int i; QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n", ha, p_hwfn->p_dev, p_hwfn, rel_vf_id); if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) return (-1); if (ha->sriov_task[i].pf_taskqueue != NULL) { atomic_testandset_32(&ha->sriov_task[i].flags, QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG); taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, &ha->sriov_task[i].pf_task); } return (ECORE_SUCCESS); } int qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id) { return (__qlnx_pf_vf_msg(p_hwfn, relative_vf_id)); } static void __qlnx_vf_flr_update(struct ecore_hwfn *p_hwfn) { qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; int i; if (!ha->sriov_initialized) return; QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n", ha, p_hwfn->p_dev, p_hwfn); if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) return; if (ha->sriov_task[i].pf_taskqueue != NULL) { atomic_testandset_32(&ha->sriov_task[i].flags, QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE); taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, &ha->sriov_task[i].pf_task); } return; } void qlnx_vf_flr_update(void *p_hwfn) { __qlnx_vf_flr_update(p_hwfn); return; } #ifndef QLNX_VF static void qlnx_vf_bulleting_update(struct ecore_hwfn *p_hwfn) { qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; int i; QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n", ha, p_hwfn->p_dev, p_hwfn); if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) return; QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p i = %d\n", ha, p_hwfn->p_dev, p_hwfn, i); if (ha->sriov_task[i].pf_taskqueue != NULL) { atomic_testandset_32(&ha->sriov_task[i].flags, QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE); taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, &ha->sriov_task[i].pf_task); } } static void qlnx_initialize_sriov(qlnx_host_t *ha) { device_t dev; nvlist_t *pf_schema, *vf_schema; int iov_error; dev = ha->pci_dev; pf_schema = pci_iov_schema_alloc_node(); vf_schema = pci_iov_schema_alloc_node(); pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); pci_iov_schema_add_bool(vf_schema, "allow-set-mac", IOV_SCHEMA_HASDEFAULT, FALSE); pci_iov_schema_add_bool(vf_schema, "allow-promisc", IOV_SCHEMA_HASDEFAULT, FALSE); pci_iov_schema_add_uint16(vf_schema, "num-queues", IOV_SCHEMA_HASDEFAULT, 1); iov_error = pci_iov_attach(dev, pf_schema, vf_schema); if (iov_error != 0) { ha->sriov_initialized = 0; } else { device_printf(dev, "SRIOV initialized\n"); ha->sriov_initialized = 1; } return; } static void qlnx_sriov_disable(qlnx_host_t *ha) { struct ecore_dev *cdev; int i, j; cdev = &ha->cdev; ecore_iov_set_vfs_to_disable(cdev, true); for_each_hwfn(cdev, i) { struct ecore_hwfn *hwfn = &cdev->hwfns[i]; struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn); if (!ptt) { QL_DPRINT1(ha, "Failed to acquire ptt\n"); return; } /* Clean WFQ db and configure equal weight for all vports */ ecore_clean_wfq_db(hwfn, ptt); ecore_for_each_vf(hwfn, j) { int k = 0; if (!ecore_iov_is_valid_vfid(hwfn, j, true, false)) continue; if (ecore_iov_is_vf_started(hwfn, j)) { /* Wait until VF is disabled before releasing */ for (k = 0; k < 100; k++) { if (!ecore_iov_is_vf_stopped(hwfn, j)) { qlnx_mdelay(__func__, 10); } else break; } } if (k < 100) ecore_iov_release_hw_for_vf(&cdev->hwfns[i], ptt, j); else { QL_DPRINT1(ha, "Timeout waiting for VF's FLR to end\n"); } } ecore_ptt_release(hwfn, ptt); } ecore_iov_set_vfs_to_disable(cdev, false); return; } static void qlnx_sriov_enable_qid_config(struct ecore_hwfn *hwfn, u16 vfid, struct ecore_iov_vf_init_params *params) { u16 base, i; /* Since we have an equal resource distribution per-VF, and we assume * PF has acquired the ECORE_PF_L2_QUE first queues, we start setting * sequentially from there. */ base = FEAT_NUM(hwfn, ECORE_PF_L2_QUE) + vfid * params->num_queues; params->rel_vf_id = vfid; for (i = 0; i < params->num_queues; i++) { params->req_rx_queue[i] = base + i; params->req_tx_queue[i] = base + i; } /* PF uses indices 0 for itself; Set vport/RSS afterwards */ params->vport_id = vfid + 1; params->rss_eng_id = vfid + 1; return; } static int qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *nvlist_params) { qlnx_host_t *ha; struct ecore_dev *cdev; struct ecore_iov_vf_init_params params; int ret, j, i; uint32_t max_vfs; if ((ha = device_get_softc(dev)) == NULL) { device_printf(dev, "%s: cannot get softc\n", __func__); return (-1); } if (qlnx_create_pf_taskqueues(ha) != 0) goto qlnx_iov_init_err0; cdev = &ha->cdev; max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT); QL_DPRINT2(ha," dev = %p enter num_vfs = %d max_vfs = %d\n", dev, num_vfs, max_vfs); if (num_vfs >= max_vfs) { QL_DPRINT1(ha, "Can start at most %d VFs\n", (RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1)); goto qlnx_iov_init_err0; } ha->vf_attr = malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF, M_NOWAIT); if (ha->vf_attr == NULL) goto qlnx_iov_init_err0; memset(¶ms, 0, sizeof(params)); /* Initialize HW for VF access */ for_each_hwfn(cdev, j) { struct ecore_hwfn *hwfn = &cdev->hwfns[j]; struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn); /* Make sure not to use more than 16 queues per VF */ params.num_queues = min_t(int, (FEAT_NUM(hwfn, ECORE_VF_L2_QUE) / num_vfs), 16); if (!ptt) { QL_DPRINT1(ha, "Failed to acquire ptt\n"); goto qlnx_iov_init_err1; } for (i = 0; i < num_vfs; i++) { if (!ecore_iov_is_valid_vfid(hwfn, i, false, true)) continue; qlnx_sriov_enable_qid_config(hwfn, i, ¶ms); ret = ecore_iov_init_hw_for_vf(hwfn, ptt, ¶ms); if (ret) { QL_DPRINT1(ha, "Failed to enable VF[%d]\n", i); ecore_ptt_release(hwfn, ptt); goto qlnx_iov_init_err1; } } ecore_ptt_release(hwfn, ptt); } ha->num_vfs = num_vfs; qlnx_inform_vf_link_state(&cdev->hwfns[0], ha); QL_DPRINT2(ha," dev = %p exit num_vfs = %d\n", dev, num_vfs); return (0); qlnx_iov_init_err1: qlnx_sriov_disable(ha); qlnx_iov_init_err0: qlnx_destroy_pf_taskqueues(ha); ha->num_vfs = 0; return (-1); } static void qlnx_iov_uninit(device_t dev) { qlnx_host_t *ha; if ((ha = device_get_softc(dev)) == NULL) { device_printf(dev, "%s: cannot get softc\n", __func__); return; } QL_DPRINT2(ha," dev = %p enter\n", dev); qlnx_sriov_disable(ha); qlnx_destroy_pf_taskqueues(ha); free(ha->vf_attr, M_QLNXBUF); ha->vf_attr = NULL; ha->num_vfs = 0; QL_DPRINT2(ha," dev = %p exit\n", dev); return; } static int qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) { qlnx_host_t *ha; qlnx_vf_attr_t *vf_attr; unsigned const char *mac; size_t size; struct ecore_hwfn *p_hwfn; if ((ha = device_get_softc(dev)) == NULL) { device_printf(dev, "%s: cannot get softc\n", __func__); return (-1); } QL_DPRINT2(ha," dev = %p enter vfnum = %d\n", dev, vfnum); if (vfnum > (ha->num_vfs - 1)) { QL_DPRINT1(ha, " VF[%d] is greater than max allowed [%d]\n", vfnum, (ha->num_vfs - 1)); } vf_attr = &ha->vf_attr[vfnum]; if (nvlist_exists_binary(params, "mac-addr")) { mac = nvlist_get_binary(params, "mac-addr", &size); bcopy(mac, vf_attr->mac_addr, ETHER_ADDR_LEN); device_printf(dev, "%s: mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, vf_attr->mac_addr[0], vf_attr->mac_addr[1], vf_attr->mac_addr[2], vf_attr->mac_addr[3], vf_attr->mac_addr[4], vf_attr->mac_addr[5]); p_hwfn = &ha->cdev.hwfns[0]; ecore_iov_bulletin_set_mac(p_hwfn, vf_attr->mac_addr, vfnum); } QL_DPRINT2(ha," dev = %p exit vfnum = %d\n", dev, vfnum); return (0); } static void qlnx_handle_vf_msg(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) { uint64_t events[ECORE_VF_ARRAY_LENGTH]; struct ecore_ptt *ptt; int i; ptt = ecore_ptt_acquire(p_hwfn); if (!ptt) { QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); __qlnx_pf_vf_msg(p_hwfn, 0); return; } ecore_iov_pf_get_pending_events(p_hwfn, events); QL_DPRINT2(ha, "Event mask of VF events:" "0x%" PRIu64 "0x%" PRIu64 " 0x%" PRIu64 "\n", events[0], events[1], events[2]); ecore_for_each_vf(p_hwfn, i) { /* Skip VFs with no pending messages */ if (!(events[i / 64] & (1ULL << (i % 64)))) continue; QL_DPRINT2(ha, "Handling VF message from VF 0x%02x [Abs 0x%02x]\n", i, p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i); /* Copy VF's message to PF's request buffer for that VF */ if (ecore_iov_copy_vf_msg(p_hwfn, ptt, i)) continue; ecore_iov_process_mbx_req(p_hwfn, ptt, i); } ecore_ptt_release(p_hwfn, ptt); return; } static void qlnx_handle_vf_flr_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) { struct ecore_ptt *ptt; int ret; ptt = ecore_ptt_acquire(p_hwfn); if (!ptt) { QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); __qlnx_vf_flr_update(p_hwfn); return; } ret = ecore_iov_vf_flr_cleanup(p_hwfn, ptt); if (ret) { QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n"); } ecore_ptt_release(p_hwfn, ptt); return; } static void qlnx_handle_bulletin_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) { struct ecore_ptt *ptt; int i; ptt = ecore_ptt_acquire(p_hwfn); if (!ptt) { QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); qlnx_vf_bulleting_update(p_hwfn); return; } ecore_for_each_vf(p_hwfn, i) { QL_DPRINT1(ha, "ecore_iov_post_vf_bulletin[%p, %d]\n", p_hwfn, i); ecore_iov_post_vf_bulletin(p_hwfn, i, ptt); } ecore_ptt_release(p_hwfn, ptt); return; } static void qlnx_pf_taskqueue(void *context, int pending) { struct ecore_hwfn *p_hwfn; qlnx_host_t *ha; int i; p_hwfn = context; if (p_hwfn == NULL) return; ha = (qlnx_host_t *)(p_hwfn->p_dev); if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) return; if (atomic_testandclear_32(&ha->sriov_task[i].flags, QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG)) qlnx_handle_vf_msg(ha, p_hwfn); if (atomic_testandclear_32(&ha->sriov_task[i].flags, QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE)) qlnx_handle_vf_flr_update(ha, p_hwfn); if (atomic_testandclear_32(&ha->sriov_task[i].flags, QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE)) qlnx_handle_bulletin_update(ha, p_hwfn); return; } static int qlnx_create_pf_taskqueues(qlnx_host_t *ha) { int i; uint8_t tq_name[32]; for (i = 0; i < ha->cdev.num_hwfns; i++) { struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; bzero(tq_name, sizeof (tq_name)); snprintf(tq_name, sizeof (tq_name), "ql_pf_tq_%d", i); TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn); ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT, taskqueue_thread_enqueue, &ha->sriov_task[i].pf_taskqueue); if (ha->sriov_task[i].pf_taskqueue == NULL) return (-1); taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1, PI_NET, "%s", tq_name); QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue); } return (0); } static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha) { int i; for (i = 0; i < ha->cdev.num_hwfns; i++) { if (ha->sriov_task[i].pf_taskqueue != NULL) { taskqueue_drain(ha->sriov_task[i].pf_taskqueue, &ha->sriov_task[i].pf_task); taskqueue_free(ha->sriov_task[i].pf_taskqueue); ha->sriov_task[i].pf_taskqueue = NULL; } } return; } static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha) { struct ecore_mcp_link_capabilities caps; struct ecore_mcp_link_params params; struct ecore_mcp_link_state link; int i; if (!p_hwfn->pf_iov_info) return; memset(¶ms, 0, sizeof(struct ecore_mcp_link_params)); memset(&link, 0, sizeof(struct ecore_mcp_link_state)); memset(&caps, 0, sizeof(struct ecore_mcp_link_capabilities)); memcpy(&caps, ecore_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); memcpy(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link)); memcpy(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params)); QL_DPRINT2(ha, "called\n"); /* Update bulletin of all future possible VFs with link configuration */ for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) { /* Modify link according to the VF's configured link state */ link.link_up = false; if (ha->link_up) { link.link_up = true; /* Set speed according to maximum supported by HW. * that is 40G for regular devices and 100G for CMT * mode devices. */ link.speed = (p_hwfn->p_dev->num_hwfns > 1) ? 100000 : link.speed; } QL_DPRINT2(ha, "link [%d] = %d\n", i, link.link_up); ecore_iov_set_link(p_hwfn, i, ¶ms, &link, &caps); } qlnx_vf_bulleting_update(p_hwfn); return; } #endif /* #ifndef QLNX_VF */ #endif /* #ifdef CONFIG_ECORE_SRIOV */ diff --git a/sys/dev/qlnx/qlnxe/qlnx_os.h b/sys/dev/qlnx/qlnxe/qlnx_os.h index 261283fb6eaf..6d717d0e70bf 100644 --- a/sys/dev/qlnx/qlnxe/qlnx_os.h +++ b/sys/dev/qlnx/qlnxe/qlnx_os.h @@ -1,149 +1,149 @@ /* * Copyright (c) 2017-2018 Cavium, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ /* * File: qlnx_os.h * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131. */ #ifndef _QLNX_OS_H_ #define _QLNX_OS_H_ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static __inline int qlnx_ms_to_hz(int ms) { int qlnx_hz; struct timeval t; t.tv_sec = ms / 1000; t.tv_usec = (ms % 1000) * 1000; qlnx_hz = tvtohz(&t); if (qlnx_hz < 0) qlnx_hz = 0x7fffffff; if (!qlnx_hz) qlnx_hz = 1; return (qlnx_hz); } static __inline int qlnx_sec_to_hz(int sec) { struct timeval t; t.tv_sec = sec; t.tv_usec = 0; return (tvtohz(&t)); } MALLOC_DECLARE(M_QLNXBUF); #define qlnx_mdelay(fn, msecs) \ {\ if (cold) \ DELAY((msecs * 1000)); \ else \ pause(fn, qlnx_ms_to_hz(msecs)); \ } /* * Locks */ -#define QLNX_LOCK(ha) mtx_lock(&ha->hw_lock) -#define QLNX_UNLOCK(ha) mtx_unlock(&ha->hw_lock) +#define QLNX_LOCK(ha) sx_xlock(&ha->hw_lock) +#define QLNX_UNLOCK(ha) sx_xunlock(&ha->hw_lock) /* * structure encapsulating a DMA buffer */ struct qlnx_dma { bus_size_t alignment; uint32_t size; void *dma_b; bus_addr_t dma_addr; bus_dmamap_t dma_map; bus_dma_tag_t dma_tag; }; typedef struct qlnx_dma qlnx_dma_t; #endif /* #ifndef _QLNX_OS_H_ */