Index: stable/11/sys/dev/oce/oce_hw.c =================================================================== --- stable/11/sys/dev/oce/oce_hw.c (revision 338937) +++ stable/11/sys/dev/oce/oce_hw.c (revision 338938) @@ -1,590 +1,595 @@ /*- * Copyright (C) 2013 Emulex * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Emulex Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * freebsd-drivers@emulex.com * * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 */ /* $FreeBSD$ */ #include "oce_if.h" static int oce_POST(POCE_SOFTC sc); /** * @brief Function to post status * @param sc software handle to the device */ static int oce_POST(POCE_SOFTC sc) { mpu_ep_semaphore_t post_status; int tmo = 60000; /* read semaphore CSR */ post_status.dw0 = OCE_READ_CSR_MPU(sc, csr, MPU_EP_SEMAPHORE(sc)); /* if host is ready then wait for fw ready else send POST */ if (post_status.bits.stage <= POST_STAGE_AWAITING_HOST_RDY) { post_status.bits.stage = POST_STAGE_CHIP_RESET; OCE_WRITE_CSR_MPU(sc, csr, MPU_EP_SEMAPHORE(sc), post_status.dw0); } /* wait for FW ready */ for (;;) { if (--tmo == 0) break; DELAY(1000); post_status.dw0 = OCE_READ_CSR_MPU(sc, csr, MPU_EP_SEMAPHORE(sc)); if (post_status.bits.error) { device_printf(sc->dev, "POST failed: %x\n", post_status.dw0); return ENXIO; } if (post_status.bits.stage == POST_STAGE_ARMFW_READY) return 0; } device_printf(sc->dev, "POST timed out: %x\n", post_status.dw0); return ENXIO; } /** * @brief Function for hardware initialization * @param sc software handle to the device */ int oce_hw_init(POCE_SOFTC sc) { int rc = 0; rc = oce_POST(sc); if (rc) return rc; /* create the bootstrap mailbox */ rc = oce_dma_alloc(sc, sizeof(struct oce_bmbx), &sc->bsmbx, 0); if (rc) { device_printf(sc->dev, "Mailbox alloc failed\n"); return rc; } rc = oce_reset_fun(sc); if (rc) goto error; rc = oce_mbox_init(sc); if (rc) goto error; rc = oce_get_fw_version(sc); if (rc) goto error; rc = oce_get_fw_config(sc); if (rc) goto error; sc->macaddr.size_of_struct = 6; rc = oce_read_mac_addr(sc, 0, 1, MAC_ADDRESS_TYPE_NETWORK, &sc->macaddr); if (rc) goto error; if ((IS_BE(sc) && (sc->flags & OCE_FLAGS_BE3)) || IS_SH(sc)) { rc = oce_mbox_check_native_mode(sc); if (rc) goto error; } else sc->be3_native = 0; return rc; error: oce_dma_free(sc, &sc->bsmbx); device_printf(sc->dev, "Hardware initialisation failed\n"); return rc; } /** * @brief Releases the obtained pci resources * @param sc software handle to the device */ void oce_hw_pci_free(POCE_SOFTC sc) { int pci_cfg_barnum = 0; if (IS_BE(sc) && (sc->flags & OCE_FLAGS_BE2)) pci_cfg_barnum = OCE_DEV_BE2_CFG_BAR; else pci_cfg_barnum = OCE_DEV_CFG_BAR; if (sc->devcfg_res != NULL) { bus_release_resource(sc->dev, SYS_RES_MEMORY, PCIR_BAR(pci_cfg_barnum), sc->devcfg_res); sc->devcfg_res = (struct resource *)NULL; sc->devcfg_btag = (bus_space_tag_t) 0; sc->devcfg_bhandle = (bus_space_handle_t)0; sc->devcfg_vhandle = (void *)NULL; } if (sc->csr_res != NULL) { bus_release_resource(sc->dev, SYS_RES_MEMORY, PCIR_BAR(OCE_PCI_CSR_BAR), sc->csr_res); sc->csr_res = (struct resource *)NULL; sc->csr_btag = (bus_space_tag_t)0; sc->csr_bhandle = (bus_space_handle_t)0; sc->csr_vhandle = (void *)NULL; } if (sc->db_res != NULL) { bus_release_resource(sc->dev, SYS_RES_MEMORY, PCIR_BAR(OCE_PCI_DB_BAR), sc->db_res); sc->db_res = (struct resource *)NULL; sc->db_btag = (bus_space_tag_t)0; sc->db_bhandle = (bus_space_handle_t)0; sc->db_vhandle = (void *)NULL; } } /** * @brief Function to get the PCI capabilities * @param sc software handle to the device */ static void oce_get_pci_capabilities(POCE_SOFTC sc) { uint32_t val; #if __FreeBSD_version >= 1000000 #define pci_find_extcap pci_find_cap #endif if (pci_find_extcap(sc->dev, PCIY_PCIX, &val) == 0) { if (val != 0) sc->flags |= OCE_FLAGS_PCIX; } if (pci_find_extcap(sc->dev, PCIY_EXPRESS, &val) == 0) { if (val != 0) { uint16_t link_status = pci_read_config(sc->dev, val + 0x12, 2); sc->flags |= OCE_FLAGS_PCIE; sc->pcie_link_speed = link_status & 0xf; sc->pcie_link_width = (link_status >> 4) & 0x3f; } } if (pci_find_extcap(sc->dev, PCIY_MSI, &val) == 0) { if (val != 0) sc->flags |= OCE_FLAGS_MSI_CAPABLE; } if (pci_find_extcap(sc->dev, PCIY_MSIX, &val) == 0) { if (val != 0) { val = pci_msix_count(sc->dev); sc->flags |= OCE_FLAGS_MSIX_CAPABLE; } } } /** * @brief Allocate PCI resources. * * @param sc software handle to the device * @returns 0 if successful, or error */ int oce_hw_pci_alloc(POCE_SOFTC sc) { int rr, pci_cfg_barnum = 0; pci_sli_intf_t intf; pci_enable_busmaster(sc->dev); oce_get_pci_capabilities(sc); sc->fn = pci_get_function(sc->dev); /* setup the device config region */ if (IS_BE(sc) && (sc->flags & OCE_FLAGS_BE2)) pci_cfg_barnum = OCE_DEV_BE2_CFG_BAR; else pci_cfg_barnum = OCE_DEV_CFG_BAR; rr = PCIR_BAR(pci_cfg_barnum); if (IS_BE(sc) || IS_SH(sc)) sc->devcfg_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &rr, RF_ACTIVE|RF_SHAREABLE); else sc->devcfg_res = bus_alloc_resource_anywhere(sc->dev, SYS_RES_MEMORY, &rr, 32768, RF_ACTIVE|RF_SHAREABLE); if (!sc->devcfg_res) goto error; sc->devcfg_btag = rman_get_bustag(sc->devcfg_res); sc->devcfg_bhandle = rman_get_bushandle(sc->devcfg_res); sc->devcfg_vhandle = rman_get_virtual(sc->devcfg_res); /* Read the SLI_INTF register and determine whether we * can use this port and its features */ intf.dw0 = pci_read_config((sc)->dev,OCE_INTF_REG_OFFSET,4); if (intf.bits.sli_valid != OCE_INTF_VALID_SIG) goto error; if (intf.bits.sli_rev != OCE_INTF_SLI_REV4) { device_printf(sc->dev, "Adapter doesnt support SLI4\n"); goto error; } if (intf.bits.sli_if_type == OCE_INTF_IF_TYPE_1) sc->flags |= OCE_FLAGS_MBOX_ENDIAN_RQD; if (intf.bits.sli_hint1 == OCE_INTF_FUNC_RESET_REQD) sc->flags |= OCE_FLAGS_FUNCRESET_RQD; if (intf.bits.sli_func_type == OCE_INTF_VIRT_FUNC) sc->flags |= OCE_FLAGS_VIRTUAL_PORT; /* Lancer has one BAR (CFG) but BE3 has three (CFG, CSR, DB) */ if (IS_BE(sc) || IS_SH(sc)) { /* set up CSR region */ rr = PCIR_BAR(OCE_PCI_CSR_BAR); sc->csr_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &rr, RF_ACTIVE|RF_SHAREABLE); if (!sc->csr_res) goto error; sc->csr_btag = rman_get_bustag(sc->csr_res); sc->csr_bhandle = rman_get_bushandle(sc->csr_res); sc->csr_vhandle = rman_get_virtual(sc->csr_res); /* set up DB doorbell region */ rr = PCIR_BAR(OCE_PCI_DB_BAR); sc->db_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &rr, RF_ACTIVE|RF_SHAREABLE); if (!sc->db_res) goto error; sc->db_btag = rman_get_bustag(sc->db_res); sc->db_bhandle = rman_get_bushandle(sc->db_res); sc->db_vhandle = rman_get_virtual(sc->db_res); } return 0; error: oce_hw_pci_free(sc); return ENXIO; } /** * @brief Function for device shutdown * @param sc software handle to the device * @returns 0 on success, error otherwise */ void oce_hw_shutdown(POCE_SOFTC sc) { oce_stats_free(sc); /* disable hardware interrupts */ oce_hw_intr_disable(sc); #if defined(INET6) || defined(INET) /* Free LRO resources */ oce_free_lro(sc); #endif /* Release queue*/ oce_queue_release_all(sc); /*Delete Network Interface*/ oce_delete_nw_interface(sc); /* After fw clean we dont send any cmds to fw.*/ oce_fw_clean(sc); /* release intr resources */ oce_intr_free(sc); /* release PCI resources */ oce_hw_pci_free(sc); /* free mbox specific resources */ LOCK_DESTROY(&sc->bmbx_lock); LOCK_DESTROY(&sc->dev_lock); oce_dma_free(sc, &sc->bsmbx); } /** * @brief Function for creating nw interface. * @param sc software handle to the device * @returns 0 on success, error otherwise */ int oce_create_nw_interface(POCE_SOFTC sc) { int rc; uint32_t capab_flags; uint32_t capab_en_flags; /* interface capabilities to give device when creating interface */ capab_flags = OCE_CAPAB_FLAGS; /* capabilities to enable by default (others set dynamically) */ capab_en_flags = OCE_CAPAB_ENABLE; if (IS_XE201(sc)) { /* LANCER A0 workaround */ capab_en_flags &= ~MBX_RX_IFACE_FLAGS_PASS_L3L4_ERR; capab_flags &= ~MBX_RX_IFACE_FLAGS_PASS_L3L4_ERR; } if (IS_SH(sc) || IS_XE201(sc)) capab_flags |= MBX_RX_IFACE_FLAGS_MULTICAST; + if (sc->enable_hwlro) { + capab_flags |= MBX_RX_IFACE_FLAGS_LRO; + capab_en_flags |= MBX_RX_IFACE_FLAGS_LRO; + } + /* enable capabilities controlled via driver startup parameters */ if (is_rss_enabled(sc)) capab_en_flags |= MBX_RX_IFACE_FLAGS_RSS; else { capab_en_flags &= ~MBX_RX_IFACE_FLAGS_RSS; capab_flags &= ~MBX_RX_IFACE_FLAGS_RSS; } rc = oce_if_create(sc, capab_flags, capab_en_flags, 0, &sc->macaddr.mac_addr[0], &sc->if_id); if (rc) return rc; atomic_inc_32(&sc->nifs); sc->if_cap_flags = capab_en_flags; /* set default flow control */ rc = oce_set_flow_control(sc, sc->flow_control); if (rc) goto error; rc = oce_rxf_set_promiscuous(sc, sc->promisc); if (rc) goto error; return rc; error: oce_delete_nw_interface(sc); return rc; } /** * @brief Function to delete a nw interface. * @param sc software handle to the device */ void oce_delete_nw_interface(POCE_SOFTC sc) { /* currently only single interface is implmeneted */ if (sc->nifs > 0) { oce_if_del(sc, sc->if_id); atomic_dec_32(&sc->nifs); } } /** * @brief Soft reset. * @param sc software handle to the device * @returns 0 on success, error otherwise */ int oce_pci_soft_reset(POCE_SOFTC sc) { int rc; mpu_ep_control_t ctrl; ctrl.dw0 = OCE_READ_CSR_MPU(sc, csr, MPU_EP_CONTROL); ctrl.bits.cpu_reset = 1; OCE_WRITE_CSR_MPU(sc, csr, MPU_EP_CONTROL, ctrl.dw0); DELAY(50); rc=oce_POST(sc); return rc; } /** * @brief Function for hardware start * @param sc software handle to the device * @returns 0 on success, error otherwise */ int oce_hw_start(POCE_SOFTC sc) { struct link_status link = { 0 }; int rc = 0; rc = oce_get_link_status(sc, &link); if (rc) return 1; if (link.logical_link_status == NTWK_LOGICAL_LINK_UP) { sc->link_status = NTWK_LOGICAL_LINK_UP; if_link_state_change(sc->ifp, LINK_STATE_UP); } else { sc->link_status = NTWK_LOGICAL_LINK_DOWN; if_link_state_change(sc->ifp, LINK_STATE_DOWN); } sc->link_speed = link.phys_port_speed; sc->qos_link_speed = (uint32_t )link.qos_link_speed * 10; rc = oce_start_mq(sc->mq); /* we need to get MCC aync events. So enable intrs and arm first EQ, Other EQs will be armed after interface is UP */ oce_hw_intr_enable(sc); oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE); /* Send first mcc cmd and after that we get gracious MCC notifications from FW */ oce_first_mcc_cmd(sc); return rc; } /** * @brief Function for hardware enable interupts. * @param sc software handle to the device */ void oce_hw_intr_enable(POCE_SOFTC sc) { uint32_t reg; reg = OCE_READ_REG32(sc, devcfg, PCICFG_INTR_CTRL); reg |= HOSTINTR_MASK; OCE_WRITE_REG32(sc, devcfg, PCICFG_INTR_CTRL, reg); } /** * @brief Function for hardware disable interupts * @param sc software handle to the device */ void oce_hw_intr_disable(POCE_SOFTC sc) { uint32_t reg; reg = OCE_READ_REG32(sc, devcfg, PCICFG_INTR_CTRL); reg &= ~HOSTINTR_MASK; OCE_WRITE_REG32(sc, devcfg, PCICFG_INTR_CTRL, reg); } /** * @brief Function for hardware update multicast filter * @param sc software handle to the device */ int oce_hw_update_multicast(POCE_SOFTC sc) { struct ifnet *ifp = sc->ifp; struct ifmultiaddr *ifma; struct mbx_set_common_iface_multicast *req = NULL; OCE_DMA_MEM dma; int rc = 0; /* Allocate DMA mem*/ if (oce_dma_alloc(sc, sizeof(struct mbx_set_common_iface_multicast), &dma, 0)) return ENOMEM; req = OCE_DMAPTR(&dma, struct mbx_set_common_iface_multicast); bzero(req, sizeof(struct mbx_set_common_iface_multicast)); #if __FreeBSD_version > 800000 if_maddr_rlock(ifp); #endif TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (req->params.req.num_mac == OCE_MAX_MC_FILTER_SIZE) { /*More multicast addresses than our hardware table So Enable multicast promiscus in our hardware to accept all multicat packets */ req->params.req.promiscuous = 1; break; } bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), &req->params.req.mac[req->params.req.num_mac], ETH_ADDR_LEN); req->params.req.num_mac = req->params.req.num_mac + 1; } #if __FreeBSD_version > 800000 if_maddr_runlock(ifp); #endif req->params.req.if_id = sc->if_id; rc = oce_update_multicast(sc, &dma); oce_dma_free(sc, &dma); return rc; } Index: stable/11/sys/dev/oce/oce_hw.h =================================================================== --- stable/11/sys/dev/oce/oce_hw.h (revision 338937) +++ stable/11/sys/dev/oce/oce_hw.h (revision 338938) @@ -1,3730 +1,4197 @@ /*- * Copyright (C) 2013 Emulex * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Emulex Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * freebsd-drivers@emulex.com * * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 */ /* $FreeBSD$ */ #include #undef _BIG_ENDIAN /* TODO */ #pragma pack(1) #define OC_CNA_GEN2 0x2 #define OC_CNA_GEN3 0x3 #define DEVID_TIGERSHARK 0x700 #define DEVID_TOMCAT 0x710 /* PCI CSR offsets */ #define PCICFG_F1_CSR 0x0 /* F1 for NIC */ #define PCICFG_SEMAPHORE 0xbc #define PCICFG_SOFT_RESET 0x5c #define PCICFG_UE_STATUS_HI_MASK 0xac #define PCICFG_UE_STATUS_LO_MASK 0xa8 #define PCICFG_ONLINE0 0xb0 #define PCICFG_ONLINE1 0xb4 #define INTR_EN 0x20000000 #define IMAGE_TRANSFER_SIZE (32 * 1024) /* 32K at a time */ /********* UE Status and Mask Registers ***/ #define PCICFG_UE_STATUS_LOW 0xA0 #define PCICFG_UE_STATUS_HIGH 0xA4 #define PCICFG_UE_STATUS_LOW_MASK 0xA8 /* Lancer SLIPORT registers */ #define SLIPORT_STATUS_OFFSET 0x404 #define SLIPORT_CONTROL_OFFSET 0x408 #define SLIPORT_ERROR1_OFFSET 0x40C #define SLIPORT_ERROR2_OFFSET 0x410 #define PHYSDEV_CONTROL_OFFSET 0x414 #define SLIPORT_STATUS_ERR_MASK 0x80000000 #define SLIPORT_STATUS_DIP_MASK 0x02000000 #define SLIPORT_STATUS_RN_MASK 0x01000000 #define SLIPORT_STATUS_RDY_MASK 0x00800000 #define SLI_PORT_CONTROL_IP_MASK 0x08000000 #define PHYSDEV_CONTROL_FW_RESET_MASK 0x00000002 #define PHYSDEV_CONTROL_DD_MASK 0x00000004 #define PHYSDEV_CONTROL_INP_MASK 0x40000000 #define SLIPORT_ERROR_NO_RESOURCE1 0x2 #define SLIPORT_ERROR_NO_RESOURCE2 0x9 /* CSR register offsets */ #define MPU_EP_CONTROL 0 #define MPU_EP_SEMAPHORE_BE3 0xac #define MPU_EP_SEMAPHORE_XE201 0x400 #define MPU_EP_SEMAPHORE_SH 0x94 #define PCICFG_INTR_CTRL 0xfc #define HOSTINTR_MASK (1 << 29) #define HOSTINTR_PFUNC_SHIFT 26 #define HOSTINTR_PFUNC_MASK 7 /* POST status reg struct */ #define POST_STAGE_POWER_ON_RESET 0x00 #define POST_STAGE_AWAITING_HOST_RDY 0x01 #define POST_STAGE_HOST_RDY 0x02 #define POST_STAGE_CHIP_RESET 0x03 #define POST_STAGE_ARMFW_READY 0xc000 #define POST_STAGE_ARMFW_UE 0xf000 /* DOORBELL registers */ #define PD_RXULP_DB 0x0100 #define PD_TXULP_DB 0x0060 #define DB_RQ_ID_MASK 0x3FF #define PD_CQ_DB 0x0120 #define PD_EQ_DB PD_CQ_DB #define PD_MPU_MBOX_DB 0x0160 #define PD_MQ_DB 0x0140 +#define DB_OFFSET 0xc0 +#define DB_LRO_RQ_ID_MASK 0x7FF + /* EQE completion types */ #define EQ_MINOR_CODE_COMPLETION 0x00 #define EQ_MINOR_CODE_OTHER 0x01 #define EQ_MAJOR_CODE_COMPLETION 0x00 /* Link Status field values */ #define PHY_LINK_FAULT_NONE 0x0 #define PHY_LINK_FAULT_LOCAL 0x01 #define PHY_LINK_FAULT_REMOTE 0x02 #define PHY_LINK_SPEED_ZERO 0x0 /* No link */ #define PHY_LINK_SPEED_10MBPS 0x1 /* (10 Mbps) */ #define PHY_LINK_SPEED_100MBPS 0x2 /* (100 Mbps) */ #define PHY_LINK_SPEED_1GBPS 0x3 /* (1 Gbps) */ #define PHY_LINK_SPEED_10GBPS 0x4 /* (10 Gbps) */ #define PHY_LINK_DUPLEX_NONE 0x0 #define PHY_LINK_DUPLEX_HALF 0x1 #define PHY_LINK_DUPLEX_FULL 0x2 #define NTWK_PORT_A 0x0 /* (Port A) */ #define NTWK_PORT_B 0x1 /* (Port B) */ #define PHY_LINK_SPEED_ZERO 0x0 /* (No link.) */ #define PHY_LINK_SPEED_10MBPS 0x1 /* (10 Mbps) */ #define PHY_LINK_SPEED_100MBPS 0x2 /* (100 Mbps) */ #define PHY_LINK_SPEED_1GBPS 0x3 /* (1 Gbps) */ #define PHY_LINK_SPEED_10GBPS 0x4 /* (10 Gbps) */ /* Hardware Address types */ #define MAC_ADDRESS_TYPE_STORAGE 0x0 /* (Storage MAC Address) */ #define MAC_ADDRESS_TYPE_NETWORK 0x1 /* (Network MAC Address) */ #define MAC_ADDRESS_TYPE_PD 0x2 /* (Protection Domain MAC Addr) */ #define MAC_ADDRESS_TYPE_MANAGEMENT 0x3 /* (Management MAC Address) */ #define MAC_ADDRESS_TYPE_FCOE 0x4 /* (FCoE MAC Address) */ /* CREATE_IFACE capability and cap_en flags */ #define MBX_RX_IFACE_FLAGS_RSS 0x4 #define MBX_RX_IFACE_FLAGS_PROMISCUOUS 0x8 #define MBX_RX_IFACE_FLAGS_BROADCAST 0x10 #define MBX_RX_IFACE_FLAGS_UNTAGGED 0x20 #define MBX_RX_IFACE_FLAGS_VLAN_PROMISCUOUS 0x80 #define MBX_RX_IFACE_FLAGS_VLAN 0x100 #define MBX_RX_IFACE_FLAGS_MCAST_PROMISCUOUS 0x200 #define MBX_RX_IFACE_FLAGS_PASS_L2_ERR 0x400 #define MBX_RX_IFACE_FLAGS_PASS_L3L4_ERR 0x800 #define MBX_RX_IFACE_FLAGS_MULTICAST 0x1000 #define MBX_RX_IFACE_RX_FILTER_IF_MULTICAST_HASH 0x2000 #define MBX_RX_IFACE_FLAGS_HDS 0x4000 #define MBX_RX_IFACE_FLAGS_DIRECTED 0x8000 #define MBX_RX_IFACE_FLAGS_VMQ 0x10000 #define MBX_RX_IFACE_FLAGS_NETQ 0x20000 #define MBX_RX_IFACE_FLAGS_QGROUPS 0x40000 #define MBX_RX_IFACE_FLAGS_LSO 0x80000 #define MBX_RX_IFACE_FLAGS_LRO 0x100000 #define MQ_RING_CONTEXT_SIZE_16 0x5 /* (16 entries) */ #define MQ_RING_CONTEXT_SIZE_32 0x6 /* (32 entries) */ #define MQ_RING_CONTEXT_SIZE_64 0x7 /* (64 entries) */ #define MQ_RING_CONTEXT_SIZE_128 0x8 /* (128 entries) */ #define MBX_DB_READY_BIT 0x1 #define MBX_DB_HI_BIT 0x2 #define ASYNC_EVENT_CODE_LINK_STATE 0x1 #define ASYNC_EVENT_LINK_UP 0x1 #define ASYNC_EVENT_LINK_DOWN 0x0 #define ASYNC_EVENT_GRP5 0x5 #define ASYNC_EVENT_CODE_DEBUG 0x6 #define ASYNC_EVENT_PVID_STATE 0x3 +#define ASYNC_EVENT_OS2BMC 0x5 #define ASYNC_EVENT_DEBUG_QNQ 0x1 #define ASYNC_EVENT_CODE_SLIPORT 0x11 #define VLAN_VID_MASK 0x0FFF /* port link_status */ #define ASYNC_EVENT_LOGICAL 0x02 /* Logical Link Status */ #define NTWK_LOGICAL_LINK_DOWN 0 #define NTWK_LOGICAL_LINK_UP 1 /* Rx filter bits */ #define NTWK_RX_FILTER_IP_CKSUM 0x1 #define NTWK_RX_FILTER_TCP_CKSUM 0x2 #define NTWK_RX_FILTER_UDP_CKSUM 0x4 #define NTWK_RX_FILTER_STRIP_CRC 0x8 /* max SGE per mbx */ #define MAX_MBX_SGE 19 /* Max multicast filter size*/ #define OCE_MAX_MC_FILTER_SIZE 64 /* PCI SLI (Service Level Interface) capabilities register */ #define OCE_INTF_REG_OFFSET 0x58 #define OCE_INTF_VALID_SIG 6 /* register's signature */ #define OCE_INTF_FUNC_RESET_REQD 1 #define OCE_INTF_HINT1_NOHINT 0 #define OCE_INTF_HINT1_SEMAINIT 1 #define OCE_INTF_HINT1_STATCTRL 2 #define OCE_INTF_IF_TYPE_0 0 #define OCE_INTF_IF_TYPE_1 1 #define OCE_INTF_IF_TYPE_2 2 #define OCE_INTF_IF_TYPE_3 3 #define OCE_INTF_SLI_REV3 3 /* not supported by driver */ #define OCE_INTF_SLI_REV4 4 /* driver supports SLI-4 */ #define OCE_INTF_PHYS_FUNC 0 #define OCE_INTF_VIRT_FUNC 1 #define OCE_INTF_FAMILY_BE2 0 /* not supported by driver */ #define OCE_INTF_FAMILY_BE3 1 /* driver supports BE3 */ #define OCE_INTF_FAMILY_A0_CHIP 0xA /* Lancer A0 chip (supported) */ #define OCE_INTF_FAMILY_B0_CHIP 0xB /* Lancer B0 chip (future) */ #define NIC_WQE_SIZE 16 #define NIC_UNICAST 0x00 #define NIC_MULTICAST 0x01 #define NIC_BROADCAST 0x02 #define NIC_HDS_NO_SPLIT 0x00 #define NIC_HDS_SPLIT_L3PL 0x01 #define NIC_HDS_SPLIT_L4PL 0x02 #define NIC_WQ_TYPE_FORWARDING 0x01 #define NIC_WQ_TYPE_STANDARD 0x02 #define NIC_WQ_TYPE_LOW_LATENCY 0x04 #define OCE_RESET_STATS 1 #define OCE_RETAIN_STATS 0 #define OCE_TXP_SW_SZ 48 typedef union pci_sli_intf_u { uint32_t dw0; struct { #ifdef _BIG_ENDIAN uint32_t sli_valid:3; uint32_t sli_hint2:5; uint32_t sli_hint1:8; uint32_t sli_if_type:4; uint32_t sli_family:4; uint32_t sli_rev:4; uint32_t rsv0:3; uint32_t sli_func_type:1; #else uint32_t sli_func_type:1; uint32_t rsv0:3; uint32_t sli_rev:4; uint32_t sli_family:4; uint32_t sli_if_type:4; uint32_t sli_hint1:8; uint32_t sli_hint2:5; uint32_t sli_valid:3; #endif } bits; } pci_sli_intf_t; /* physical address structure to be used in MBX */ struct phys_addr { /* dw0 */ uint32_t lo; /* dw1 */ uint32_t hi; }; typedef union pcicfg_intr_ctl_u { uint32_t dw0; struct { #ifdef _BIG_ENDIAN uint32_t winselect:2; uint32_t hostintr:1; uint32_t pfnum:3; uint32_t vf_cev_int_line_en:1; uint32_t winaddr:23; uint32_t membarwinen:1; #else uint32_t membarwinen:1; uint32_t winaddr:23; uint32_t vf_cev_int_line_en:1; uint32_t pfnum:3; uint32_t hostintr:1; uint32_t winselect:2; #endif } bits; } pcicfg_intr_ctl_t; typedef union pcicfg_semaphore_u { uint32_t dw0; struct { #ifdef _BIG_ENDIAN uint32_t rsvd:31; uint32_t lock:1; #else uint32_t lock:1; uint32_t rsvd:31; #endif } bits; } pcicfg_semaphore_t; typedef union pcicfg_soft_reset_u { uint32_t dw0; struct { #ifdef _BIG_ENDIAN uint32_t nec_ll_rcvdetect:8; uint32_t dbg_all_reqs_62_49:14; uint32_t scratchpad0:1; uint32_t exception_oe:1; uint32_t soft_reset:1; uint32_t rsvd0:7; #else uint32_t rsvd0:7; uint32_t soft_reset:1; uint32_t exception_oe:1; uint32_t scratchpad0:1; uint32_t dbg_all_reqs_62_49:14; uint32_t nec_ll_rcvdetect:8; #endif } bits; } pcicfg_soft_reset_t; typedef union pcicfg_online1_u { uint32_t dw0; struct { #ifdef _BIG_ENDIAN uint32_t host8_online:1; uint32_t host7_online:1; uint32_t host6_online:1; uint32_t host5_online:1; uint32_t host4_online:1; uint32_t host3_online:1; uint32_t host2_online:1; uint32_t ipc_online:1; uint32_t arm_online:1; uint32_t txp_online:1; uint32_t xaui_online:1; uint32_t rxpp_online:1; uint32_t txpb_online:1; uint32_t rr_online:1; uint32_t pmem_online:1; uint32_t pctl1_online:1; uint32_t pctl0_online:1; uint32_t pcs1online_online:1; uint32_t mpu_iram_online:1; uint32_t pcs0online_online:1; uint32_t mgmt_mac_online:1; uint32_t lpcmemhost_online:1; #else uint32_t lpcmemhost_online:1; uint32_t mgmt_mac_online:1; uint32_t pcs0online_online:1; uint32_t mpu_iram_online:1; uint32_t pcs1online_online:1; uint32_t pctl0_online:1; uint32_t pctl1_online:1; uint32_t pmem_online:1; uint32_t rr_online:1; uint32_t txpb_online:1; uint32_t rxpp_online:1; uint32_t xaui_online:1; uint32_t txp_online:1; uint32_t arm_online:1; uint32_t ipc_online:1; uint32_t host2_online:1; uint32_t host3_online:1; uint32_t host4_online:1; uint32_t host5_online:1; uint32_t host6_online:1; uint32_t host7_online:1; uint32_t host8_online:1; #endif } bits; } pcicfg_online1_t; typedef union mpu_ep_semaphore_u { uint32_t dw0; struct { #ifdef _BIG_ENDIAN uint32_t error:1; uint32_t backup_fw:1; uint32_t iscsi_no_ip:1; uint32_t iscsi_ip_conflict:1; uint32_t option_rom_installed:1; uint32_t iscsi_drv_loaded:1; uint32_t rsvd0:10; uint32_t stage:16; #else uint32_t stage:16; uint32_t rsvd0:10; uint32_t iscsi_drv_loaded:1; uint32_t option_rom_installed:1; uint32_t iscsi_ip_conflict:1; uint32_t iscsi_no_ip:1; uint32_t backup_fw:1; uint32_t error:1; #endif } bits; } mpu_ep_semaphore_t; typedef union mpu_ep_control_u { uint32_t dw0; struct { #ifdef _BIG_ENDIAN uint32_t cpu_reset:1; uint32_t rsvd1:15; uint32_t ep_ram_init_status:1; uint32_t rsvd0:12; uint32_t m2_rxpbuf:1; uint32_t m1_rxpbuf:1; uint32_t m0_rxpbuf:1; #else uint32_t m0_rxpbuf:1; uint32_t m1_rxpbuf:1; uint32_t m2_rxpbuf:1; uint32_t rsvd0:12; uint32_t ep_ram_init_status:1; uint32_t rsvd1:15; uint32_t cpu_reset:1; #endif } bits; } mpu_ep_control_t; /* RX doorbell */ typedef union pd_rxulp_db_u { uint32_t dw0; struct { #ifdef _BIG_ENDIAN uint32_t num_posted:8; uint32_t invalidate:1; uint32_t rsvd1:13; uint32_t qid:10; #else uint32_t qid:10; uint32_t rsvd1:13; uint32_t invalidate:1; uint32_t num_posted:8; #endif } bits; } pd_rxulp_db_t; /* TX doorbell */ typedef union pd_txulp_db_u { uint32_t dw0; struct { #ifdef _BIG_ENDIAN uint32_t rsvd1:2; uint32_t num_posted:14; uint32_t rsvd0:6; uint32_t qid:10; #else uint32_t qid:10; uint32_t rsvd0:6; uint32_t num_posted:14; uint32_t rsvd1:2; #endif } bits; } pd_txulp_db_t; /* CQ doorbell */ typedef union cq_db_u { uint32_t dw0; struct { #ifdef _BIG_ENDIAN uint32_t rsvd1:2; uint32_t rearm:1; uint32_t num_popped:13; uint32_t rsvd0:5; uint32_t event:1; uint32_t qid:10; #else uint32_t qid:10; uint32_t event:1; uint32_t rsvd0:5; uint32_t num_popped:13; uint32_t rearm:1; uint32_t rsvd1:2; #endif } bits; } cq_db_t; /* EQ doorbell */ typedef union eq_db_u { uint32_t dw0; struct { #ifdef _BIG_ENDIAN uint32_t rsvd1:2; uint32_t rearm:1; uint32_t num_popped:13; uint32_t rsvd0:5; uint32_t event:1; uint32_t clrint:1; uint32_t qid:9; #else uint32_t qid:9; uint32_t clrint:1; uint32_t event:1; uint32_t rsvd0:5; uint32_t num_popped:13; uint32_t rearm:1; uint32_t rsvd1:2; #endif } bits; } eq_db_t; /* bootstrap mbox doorbell */ typedef union pd_mpu_mbox_db_u { uint32_t dw0; struct { #ifdef _BIG_ENDIAN uint32_t address:30; uint32_t hi:1; uint32_t ready:1; #else uint32_t ready:1; uint32_t hi:1; uint32_t address:30; #endif } bits; } pd_mpu_mbox_db_t; /* MQ ring doorbell */ typedef union pd_mq_db_u { uint32_t dw0; struct { #ifdef _BIG_ENDIAN uint32_t rsvd1:2; uint32_t num_posted:14; uint32_t rsvd0:5; uint32_t mq_id:11; #else uint32_t mq_id:11; uint32_t rsvd0:5; uint32_t num_posted:14; uint32_t rsvd1:2; #endif } bits; } pd_mq_db_t; /* * Event Queue Entry */ struct oce_eqe { uint32_t evnt; }; /* MQ scatter gather entry. Array of these make an SGL */ struct oce_mq_sge { uint32_t pa_lo; uint32_t pa_hi; uint32_t length; }; /* * payload can contain an SGL or an embedded array of upto 59 dwords */ struct oce_mbx_payload { union { union { struct oce_mq_sge sgl[MAX_MBX_SGE]; uint32_t embedded[59]; } u1; uint32_t dw[59]; } u0; }; /* * MQ MBX structure */ struct oce_mbx { union { struct { #ifdef _BIG_ENDIAN uint32_t special:8; uint32_t rsvd1:16; uint32_t sge_count:5; uint32_t rsvd0:2; uint32_t embedded:1; #else uint32_t embedded:1; uint32_t rsvd0:2; uint32_t sge_count:5; uint32_t rsvd1:16; uint32_t special:8; #endif } s; uint32_t dw0; } u0; uint32_t payload_length; uint32_t tag[2]; uint32_t rsvd2[1]; struct oce_mbx_payload payload; }; /* completion queue entry for MQ */ struct oce_mq_cqe { union { struct { #ifdef _BIG_ENDIAN /* dw0 */ uint32_t extended_status:16; uint32_t completion_status:16; /* dw1 dw2 */ uint32_t mq_tag[2]; /* dw3 */ uint32_t valid:1; uint32_t async_event:1; uint32_t hpi_buffer_cmpl:1; uint32_t completed:1; uint32_t consumed:1; uint32_t rsvd0:3; uint32_t async_type:8; uint32_t event_type:8; uint32_t rsvd1:8; #else /* dw0 */ uint32_t completion_status:16; uint32_t extended_status:16; /* dw1 dw2 */ uint32_t mq_tag[2]; /* dw3 */ uint32_t rsvd1:8; uint32_t event_type:8; uint32_t async_type:8; uint32_t rsvd0:3; uint32_t consumed:1; uint32_t completed:1; uint32_t hpi_buffer_cmpl:1; uint32_t async_event:1; uint32_t valid:1; #endif } s; uint32_t dw[4]; } u0; }; /* Mailbox Completion Status Codes */ enum MBX_COMPLETION_STATUS { MBX_CQE_STATUS_SUCCESS = 0x00, MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES = 0x01, MBX_CQE_STATUS_INVALID_PARAMETER = 0x02, MBX_CQE_STATUS_INSUFFICIENT_RESOURCES = 0x03, MBX_CQE_STATUS_QUEUE_FLUSHING = 0x04, MBX_CQE_STATUS_DMA_FAILED = 0x05 }; struct oce_async_cqe_link_state { union { struct { #ifdef _BIG_ENDIAN /* dw0 */ uint8_t speed; uint8_t duplex; uint8_t link_status; uint8_t phy_port; /* dw1 */ uint16_t qos_link_speed; uint8_t rsvd0; uint8_t fault; /* dw2 */ uint32_t event_tag; /* dw3 */ uint32_t valid:1; uint32_t async_event:1; uint32_t rsvd2:6; uint32_t event_type:8; uint32_t event_code:8; uint32_t rsvd1:8; #else /* dw0 */ uint8_t phy_port; uint8_t link_status; uint8_t duplex; uint8_t speed; /* dw1 */ uint8_t fault; uint8_t rsvd0; uint16_t qos_link_speed; /* dw2 */ uint32_t event_tag; /* dw3 */ uint32_t rsvd1:8; uint32_t event_code:8; uint32_t event_type:8; uint32_t rsvd2:6; uint32_t async_event:1; uint32_t valid:1; #endif } s; uint32_t dw[4]; } u0; }; +/* OS2BMC async event */ +struct oce_async_evt_grp5_os2bmc { + union { + struct { + uint32_t lrn_enable:1; + uint32_t lrn_disable:1; + uint32_t mgmt_enable:1; + uint32_t mgmt_disable:1; + uint32_t rsvd0:12; + uint32_t vlan_tag:16; + uint32_t arp_filter:1; + uint32_t dhcp_client_filt:1; + uint32_t dhcp_server_filt:1; + uint32_t net_bios_filt:1; + uint32_t rsvd1:3; + uint32_t bcast_filt:1; + uint32_t ipv6_nbr_filt:1; + uint32_t ipv6_ra_filt:1; + uint32_t ipv6_ras_filt:1; + uint32_t rsvd2[4]; + uint32_t mcast_filt:1; + uint32_t rsvd3:16; + uint32_t evt_tag; + uint32_t dword3; + } s; + uint32_t dword[4]; + } u; +}; /* PVID aync event */ struct oce_async_event_grp5_pvid_state { uint8_t enabled; uint8_t rsvd0; uint16_t tag; uint32_t event_tag; uint32_t rsvd1; uint32_t code; }; /* async event indicating outer VLAN tag in QnQ */ struct oce_async_event_qnq { uint8_t valid; /* Indicates if outer VLAN is valid */ uint8_t rsvd0; uint16_t vlan_tag; uint32_t event_tag; uint8_t rsvd1[4]; uint32_t code; } ; typedef union oce_mq_ext_ctx_u { uint32_t dw[6]; struct { #ifdef _BIG_ENDIAN /* dw0 */ uint32_t dw4rsvd1:16; uint32_t num_pages:16; /* dw1 */ uint32_t async_evt_bitmap; /* dw2 */ uint32_t cq_id:10; uint32_t dw5rsvd2:2; uint32_t ring_size:4; uint32_t dw5rsvd1:16; /* dw3 */ uint32_t valid:1; uint32_t dw6rsvd1:31; /* dw4 */ uint32_t dw7rsvd1:21; uint32_t async_cq_id:10; uint32_t async_cq_valid:1; #else /* dw0 */ uint32_t num_pages:16; uint32_t dw4rsvd1:16; /* dw1 */ uint32_t async_evt_bitmap; /* dw2 */ uint32_t dw5rsvd1:16; uint32_t ring_size:4; uint32_t dw5rsvd2:2; uint32_t cq_id:10; /* dw3 */ uint32_t dw6rsvd1:31; uint32_t valid:1; /* dw4 */ uint32_t async_cq_valid:1; uint32_t async_cq_id:10; uint32_t dw7rsvd1:21; #endif /* dw5 */ uint32_t dw8rsvd1; } v0; struct { #ifdef _BIG_ENDIAN /* dw0 */ uint32_t cq_id:16; uint32_t num_pages:16; /* dw1 */ uint32_t async_evt_bitmap; /* dw2 */ uint32_t dw5rsvd2:12; uint32_t ring_size:4; uint32_t async_cq_id:16; /* dw3 */ uint32_t valid:1; uint32_t dw6rsvd1:31; /* dw4 */ uint32_t dw7rsvd1:31; uint32_t async_cq_valid:1; #else /* dw0 */ uint32_t num_pages:16; uint32_t cq_id:16; /* dw1 */ uint32_t async_evt_bitmap; /* dw2 */ uint32_t async_cq_id:16; uint32_t ring_size:4; uint32_t dw5rsvd2:12; /* dw3 */ uint32_t dw6rsvd1:31; uint32_t valid:1; /* dw4 */ uint32_t async_cq_valid:1; uint32_t dw7rsvd1:31; #endif /* dw5 */ uint32_t dw8rsvd1; } v1; } oce_mq_ext_ctx_t; /* MQ mailbox structure */ struct oce_bmbx { struct oce_mbx mbx; struct oce_mq_cqe cqe; }; /* ---[ MBXs start here ]---------------------------------------------- */ /* MBXs sub system codes */ enum MBX_SUBSYSTEM_CODES { MBX_SUBSYSTEM_RSVD = 0, MBX_SUBSYSTEM_COMMON = 1, MBX_SUBSYSTEM_COMMON_ISCSI = 2, MBX_SUBSYSTEM_NIC = 3, MBX_SUBSYSTEM_TOE = 4, MBX_SUBSYSTEM_PXE_UNDI = 5, MBX_SUBSYSTEM_ISCSI_INI = 6, MBX_SUBSYSTEM_ISCSI_TGT = 7, MBX_SUBSYSTEM_MILI_PTL = 8, MBX_SUBSYSTEM_MILI_TMD = 9, MBX_SUBSYSTEM_RDMA = 10, MBX_SUBSYSTEM_LOWLEVEL = 11, MBX_SUBSYSTEM_LRO = 13, IOCBMBX_SUBSYSTEM_DCBX = 15, IOCBMBX_SUBSYSTEM_DIAG = 16, IOCBMBX_SUBSYSTEM_VENDOR = 17 }; /* common ioctl opcodes */ enum COMMON_SUBSYSTEM_OPCODES { /* These opcodes are common to both networking and storage PCI functions * They are used to reserve resources and configure CNA. These opcodes * all use the MBX_SUBSYSTEM_COMMON subsystem code. */ OPCODE_COMMON_QUERY_IFACE_MAC = 1, OPCODE_COMMON_SET_IFACE_MAC = 2, OPCODE_COMMON_SET_IFACE_MULTICAST = 3, OPCODE_COMMON_CONFIG_IFACE_VLAN = 4, OPCODE_COMMON_QUERY_LINK_CONFIG = 5, OPCODE_COMMON_READ_FLASHROM = 6, OPCODE_COMMON_WRITE_FLASHROM = 7, OPCODE_COMMON_QUERY_MAX_MBX_BUFFER_SIZE = 8, OPCODE_COMMON_CREATE_CQ = 12, OPCODE_COMMON_CREATE_EQ = 13, OPCODE_COMMON_CREATE_MQ = 21, OPCODE_COMMON_GET_QOS = 27, OPCODE_COMMON_SET_QOS = 28, OPCODE_COMMON_READ_EPROM = 30, OPCODE_COMMON_GET_CNTL_ATTRIBUTES = 32, OPCODE_COMMON_NOP = 33, OPCODE_COMMON_SET_IFACE_RX_FILTER = 34, OPCODE_COMMON_GET_FW_VERSION = 35, OPCODE_COMMON_SET_FLOW_CONTROL = 36, OPCODE_COMMON_GET_FLOW_CONTROL = 37, OPCODE_COMMON_SET_FRAME_SIZE = 39, OPCODE_COMMON_MODIFY_EQ_DELAY = 41, OPCODE_COMMON_CREATE_IFACE = 50, OPCODE_COMMON_DESTROY_IFACE = 51, OPCODE_COMMON_MODIFY_MSI_MESSAGES = 52, OPCODE_COMMON_DESTROY_MQ = 53, OPCODE_COMMON_DESTROY_CQ = 54, OPCODE_COMMON_DESTROY_EQ = 55, OPCODE_COMMON_UPLOAD_TCP = 56, OPCODE_COMMON_SET_NTWK_LINK_SPEED = 57, OPCODE_COMMON_QUERY_FIRMWARE_CONFIG = 58, OPCODE_COMMON_ADD_IFACE_MAC = 59, OPCODE_COMMON_DEL_IFACE_MAC = 60, OPCODE_COMMON_FUNCTION_RESET = 61, OPCODE_COMMON_SET_PHYSICAL_LINK_CONFIG = 62, OPCODE_COMMON_GET_BOOT_CONFIG = 66, OPCPDE_COMMON_SET_BOOT_CONFIG = 67, OPCODE_COMMON_SET_BEACON_CONFIG = 69, OPCODE_COMMON_GET_BEACON_CONFIG = 70, OPCODE_COMMON_GET_PHYSICAL_LINK_CONFIG = 71, OPCODE_COMMON_READ_TRANSRECEIVER_DATA = 73, OPCODE_COMMON_GET_OEM_ATTRIBUTES = 76, OPCODE_COMMON_GET_PORT_NAME = 77, OPCODE_COMMON_GET_CONFIG_SIGNATURE = 78, OPCODE_COMMON_SET_CONFIG_SIGNATURE = 79, OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG = 80, OPCODE_COMMON_GET_BE_CONFIGURATION_RESOURCES = 81, OPCODE_COMMON_SET_BE_CONFIGURATION_RESOURCES = 82, OPCODE_COMMON_GET_RESET_NEEDED = 84, OPCODE_COMMON_GET_SERIAL_NUMBER = 85, OPCODE_COMMON_GET_NCSI_CONFIG = 86, OPCODE_COMMON_SET_NCSI_CONFIG = 87, OPCODE_COMMON_CREATE_MQ_EXT = 90, OPCODE_COMMON_SET_FUNCTION_PRIVILEGES = 100, OPCODE_COMMON_SET_VF_PORT_TYPE = 101, OPCODE_COMMON_GET_PHY_CONFIG = 102, OPCODE_COMMON_SET_FUNCTIONAL_CAPS = 103, OPCODE_COMMON_GET_ADAPTER_ID = 110, OPCODE_COMMON_GET_UPGRADE_FEATURES = 111, OPCODE_COMMON_GET_INSTALLED_FEATURES = 112, OPCODE_COMMON_GET_AVAIL_PERSONALITIES = 113, OPCODE_COMMON_GET_CONFIG_PERSONALITIES = 114, OPCODE_COMMON_SEND_ACTIVATION = 115, OPCODE_COMMON_RESET_LICENSES = 116, OPCODE_COMMON_GET_CNTL_ADDL_ATTRIBUTES = 121, OPCODE_COMMON_QUERY_TCB = 144, OPCODE_COMMON_ADD_IFACE_QUEUE_FILTER = 145, OPCODE_COMMON_DEL_IFACE_QUEUE_FILTER = 146, OPCODE_COMMON_GET_IFACE_MAC_LIST = 147, OPCODE_COMMON_SET_IFACE_MAC_LIST = 148, OPCODE_COMMON_MODIFY_CQ = 149, OPCODE_COMMON_GET_IFACE_VLAN_LIST = 150, OPCODE_COMMON_SET_IFACE_VLAN_LIST = 151, OPCODE_COMMON_GET_HSW_CONFIG = 152, OPCODE_COMMON_SET_HSW_CONFIG = 153, OPCODE_COMMON_GET_RESOURCE_EXTENT_INFO = 154, OPCODE_COMMON_GET_ALLOCATED_RESOURCE_EXTENTS = 155, OPCODE_COMMON_ALLOC_RESOURCE_EXTENTS = 156, OPCODE_COMMON_DEALLOC_RESOURCE_EXTENTS = 157, OPCODE_COMMON_SET_DIAG_REGISTERS = 158, OPCODE_COMMON_GET_FUNCTION_CONFIG = 160, OPCODE_COMMON_GET_PROFILE_CAPACITIES = 161, OPCODE_COMMON_GET_MR_PROFILE_CAPACITIES = 162, OPCODE_COMMON_SET_MR_PROFILE_CAPACITIES = 163, OPCODE_COMMON_GET_PROFILE_CONFIG = 164, OPCODE_COMMON_SET_PROFILE_CONFIG = 165, OPCODE_COMMON_GET_PROFILE_LIST = 166, OPCODE_COMMON_GET_ACTIVE_PROFILE = 167, OPCODE_COMMON_SET_ACTIVE_PROFILE = 168, OPCODE_COMMON_GET_FUNCTION_PRIVILEGES = 170, OPCODE_COMMON_READ_OBJECT = 171, OPCODE_COMMON_WRITE_OBJECT = 172 }; /* common ioctl header */ #define OCE_MBX_VER_V2 0x0002 /* Version V2 mailbox command */ #define OCE_MBX_VER_V1 0x0001 /* Version V1 mailbox command */ #define OCE_MBX_VER_V0 0x0000 /* Version V0 mailbox command */ struct mbx_hdr { union { uint32_t dw[4]; struct { #ifdef _BIG_ENDIAN /* dw 0 */ uint32_t domain:8; uint32_t port_number:8; uint32_t subsystem:8; uint32_t opcode:8; /* dw 1 */ uint32_t timeout; /* dw 2 */ uint32_t request_length; /* dw 3 */ uint32_t rsvd0:24; uint32_t version:8; #else /* dw 0 */ uint32_t opcode:8; uint32_t subsystem:8; uint32_t port_number:8; uint32_t domain:8; /* dw 1 */ uint32_t timeout; /* dw 2 */ uint32_t request_length; /* dw 3 */ uint32_t version:8; uint32_t rsvd0:24; #endif } req; struct { #ifdef _BIG_ENDIAN /* dw 0 */ uint32_t domain:8; uint32_t rsvd0:8; uint32_t subsystem:8; uint32_t opcode:8; /* dw 1 */ uint32_t rsvd1:16; uint32_t additional_status:8; uint32_t status:8; #else /* dw 0 */ uint32_t opcode:8; uint32_t subsystem:8; uint32_t rsvd0:8; uint32_t domain:8; /* dw 1 */ uint32_t status:8; uint32_t additional_status:8; uint32_t rsvd1:16; #endif uint32_t rsp_length; uint32_t actual_rsp_length; } rsp; } u0; }; #define OCE_BMBX_RHDR_SZ 20 #define OCE_MBX_RRHDR_SZ sizeof (struct mbx_hdr) #define OCE_MBX_ADDL_STATUS(_MHDR) ((_MHDR)->u0.rsp.additional_status) #define OCE_MBX_STATUS(_MHDR) ((_MHDR)->u0.rsp.status) /* [05] OPCODE_COMMON_QUERY_LINK_CONFIG_V1 */ struct mbx_query_common_link_config { struct mbx_hdr hdr; union { struct { uint32_t rsvd0; } req; struct { #ifdef _BIG_ENDIAN uint32_t physical_port_fault:8; uint32_t physical_port_speed:8; uint32_t link_duplex:8; uint32_t pt:2; uint32_t port_number:6; uint16_t qos_link_speed; uint16_t rsvd0; uint32_t rsvd1:21; uint32_t phys_fcv:1; uint32_t phys_rxf:1; uint32_t phys_txf:1; uint32_t logical_link_status:8; #else uint32_t port_number:6; uint32_t pt:2; uint32_t link_duplex:8; uint32_t physical_port_speed:8; uint32_t physical_port_fault:8; uint16_t rsvd0; uint16_t qos_link_speed; uint32_t logical_link_status:8; uint32_t phys_txf:1; uint32_t phys_rxf:1; uint32_t phys_fcv:1; uint32_t rsvd1:21; #endif } rsp; } params; }; /* [57] OPCODE_COMMON_SET_LINK_SPEED */ struct mbx_set_common_link_speed { struct mbx_hdr hdr; union { struct { #ifdef _BIG_ENDIAN uint8_t rsvd0; uint8_t mac_speed; uint8_t virtual_port; uint8_t physical_port; #else uint8_t physical_port; uint8_t virtual_port; uint8_t mac_speed; uint8_t rsvd0; #endif } req; struct { uint32_t rsvd0; } rsp; uint32_t dw; } params; }; struct mac_address_format { uint16_t size_of_struct; uint8_t mac_addr[6]; }; /* [01] OPCODE_COMMON_QUERY_IFACE_MAC */ struct mbx_query_common_iface_mac { struct mbx_hdr hdr; union { struct { #ifdef _BIG_ENDIAN uint16_t if_id; uint8_t permanent; uint8_t type; #else uint8_t type; uint8_t permanent; uint16_t if_id; #endif } req; struct { struct mac_address_format mac; } rsp; } params; }; /* [02] OPCODE_COMMON_SET_IFACE_MAC */ struct mbx_set_common_iface_mac { struct mbx_hdr hdr; union { struct { #ifdef _BIG_ENDIAN /* dw 0 */ uint16_t if_id; uint8_t invalidate; uint8_t type; #else /* dw 0 */ uint8_t type; uint8_t invalidate; uint16_t if_id; #endif /* dw 1 */ struct mac_address_format mac; } req; struct { uint32_t rsvd0; } rsp; uint32_t dw[2]; } params; }; /* [03] OPCODE_COMMON_SET_IFACE_MULTICAST */ struct mbx_set_common_iface_multicast { struct mbx_hdr hdr; union { struct { /* dw 0 */ uint16_t num_mac; uint8_t promiscuous; uint8_t if_id; /* dw 1-48 */ struct { uint8_t byte[6]; } mac[32]; } req; struct { uint32_t rsvd0; } rsp; uint32_t dw[49]; } params; }; struct qinq_vlan { #ifdef _BIG_ENDIAN uint16_t inner; uint16_t outer; #else uint16_t outer; uint16_t inner; #endif }; struct normal_vlan { uint16_t vtag; }; struct ntwk_if_vlan_tag { union { struct normal_vlan normal; struct qinq_vlan qinq; } u0; }; /* [50] OPCODE_COMMON_CREATE_IFACE */ struct mbx_create_common_iface { struct mbx_hdr hdr; union { struct { uint32_t version; uint32_t cap_flags; uint32_t enable_flags; uint8_t mac_addr[6]; uint8_t rsvd0; uint8_t mac_invalid; struct ntwk_if_vlan_tag vlan_tag; } req; struct { uint32_t if_id; uint32_t pmac_id; } rsp; uint32_t dw[4]; } params; }; /* [51] OPCODE_COMMON_DESTROY_IFACE */ struct mbx_destroy_common_iface { struct mbx_hdr hdr; union { struct { uint32_t if_id; } req; struct { uint32_t rsvd0; } rsp; uint32_t dw; } params; }; /* event queue context structure */ struct oce_eq_ctx { #ifdef _BIG_ENDIAN uint32_t dw4rsvd1:16; uint32_t num_pages:16; uint32_t size:1; uint32_t dw5rsvd2:1; uint32_t valid:1; uint32_t dw5rsvd1:29; uint32_t armed:1; uint32_t dw6rsvd2:2; uint32_t count:3; uint32_t dw6rsvd1:26; uint32_t dw7rsvd2:9; uint32_t delay_mult:10; uint32_t dw7rsvd1:13; uint32_t dw8rsvd1; #else uint32_t num_pages:16; uint32_t dw4rsvd1:16; uint32_t dw5rsvd1:29; uint32_t valid:1; uint32_t dw5rsvd2:1; uint32_t size:1; uint32_t dw6rsvd1:26; uint32_t count:3; uint32_t dw6rsvd2:2; uint32_t armed:1; uint32_t dw7rsvd1:13; uint32_t delay_mult:10; uint32_t dw7rsvd2:9; uint32_t dw8rsvd1; #endif }; /* [13] OPCODE_COMMON_CREATE_EQ */ struct mbx_create_common_eq { struct mbx_hdr hdr; union { struct { struct oce_eq_ctx ctx; struct phys_addr pages[8]; } req; struct { uint16_t eq_id; uint16_t rsvd0; } rsp; } params; }; /* [55] OPCODE_COMMON_DESTROY_EQ */ struct mbx_destroy_common_eq { struct mbx_hdr hdr; union { struct { #ifdef _BIG_ENDIAN uint16_t rsvd0; uint16_t id; #else uint16_t id; uint16_t rsvd0; #endif } req; struct { uint32_t rsvd0; } rsp; } params; }; /* SLI-4 CQ context - use version V0 for B3, version V2 for Lancer */ typedef union oce_cq_ctx_u { uint32_t dw[5]; struct { #ifdef _BIG_ENDIAN /* dw4 */ uint32_t dw4rsvd1:16; uint32_t num_pages:16; /* dw5 */ uint32_t eventable:1; uint32_t dw5rsvd3:1; uint32_t valid:1; uint32_t count:2; uint32_t dw5rsvd2:12; uint32_t nodelay:1; uint32_t coalesce_wm:2; uint32_t dw5rsvd1:12; /* dw6 */ uint32_t armed:1; uint32_t dw6rsvd2:1; uint32_t eq_id:8; uint32_t dw6rsvd1:22; #else /* dw4 */ uint32_t num_pages:16; uint32_t dw4rsvd1:16; /* dw5 */ uint32_t dw5rsvd1:12; uint32_t coalesce_wm:2; uint32_t nodelay:1; uint32_t dw5rsvd2:12; uint32_t count:2; uint32_t valid:1; uint32_t dw5rsvd3:1; uint32_t eventable:1; /* dw6 */ uint32_t dw6rsvd1:22; uint32_t eq_id:8; uint32_t dw6rsvd2:1; uint32_t armed:1; #endif /* dw7 */ uint32_t dw7rsvd1; /* dw8 */ uint32_t dw8rsvd1; } v0; struct { #ifdef _BIG_ENDIAN /* dw4 */ uint32_t dw4rsvd1:8; uint32_t page_size:8; uint32_t num_pages:16; /* dw5 */ uint32_t eventable:1; uint32_t dw5rsvd3:1; uint32_t valid:1; uint32_t count:2; uint32_t dw5rsvd2:11; uint32_t autovalid:1; uint32_t nodelay:1; uint32_t coalesce_wm:2; uint32_t dw5rsvd1:12; /* dw6 */ uint32_t armed:1; uint32_t dw6rsvd1:15; uint32_t eq_id:16; /* dw7 */ uint32_t dw7rsvd1:16; uint32_t cqe_count:16; #else /* dw4 */ uint32_t num_pages:16; uint32_t page_size:8; uint32_t dw4rsvd1:8; /* dw5 */ uint32_t dw5rsvd1:12; uint32_t coalesce_wm:2; uint32_t nodelay:1; uint32_t autovalid:1; uint32_t dw5rsvd2:11; uint32_t count:2; uint32_t valid:1; uint32_t dw5rsvd3:1; uint32_t eventable:1; /* dw6 */ - uint32_t eq_id:8; + uint32_t eq_id:16; uint32_t dw6rsvd1:15; uint32_t armed:1; /* dw7 */ uint32_t cqe_count:16; uint32_t dw7rsvd1:16; #endif /* dw8 */ uint32_t dw8rsvd1; } v2; } oce_cq_ctx_t; /* [12] OPCODE_COMMON_CREATE_CQ */ struct mbx_create_common_cq { struct mbx_hdr hdr; union { struct { oce_cq_ctx_t cq_ctx; struct phys_addr pages[4]; } req; struct { uint16_t cq_id; uint16_t rsvd0; } rsp; } params; }; /* [54] OPCODE_COMMON_DESTROY_CQ */ struct mbx_destroy_common_cq { struct mbx_hdr hdr; union { struct { #ifdef _BIG_ENDIAN uint16_t rsvd0; uint16_t id; #else uint16_t id; uint16_t rsvd0; #endif } req; struct { uint32_t rsvd0; } rsp; } params; }; typedef union oce_mq_ctx_u { uint32_t dw[5]; struct { #ifdef _BIG_ENDIAN /* dw4 */ uint32_t dw4rsvd1:16; uint32_t num_pages:16; /* dw5 */ uint32_t cq_id:10; uint32_t dw5rsvd2:2; uint32_t ring_size:4; uint32_t dw5rsvd1:16; /* dw6 */ uint32_t valid:1; uint32_t dw6rsvd1:31; /* dw7 */ uint32_t dw7rsvd1:21; uint32_t async_cq_id:10; uint32_t async_cq_valid:1; #else /* dw4 */ uint32_t num_pages:16; uint32_t dw4rsvd1:16; /* dw5 */ uint32_t dw5rsvd1:16; uint32_t ring_size:4; uint32_t dw5rsvd2:2; uint32_t cq_id:10; /* dw6 */ uint32_t dw6rsvd1:31; uint32_t valid:1; /* dw7 */ uint32_t async_cq_valid:1; uint32_t async_cq_id:10; uint32_t dw7rsvd1:21; #endif /* dw8 */ uint32_t dw8rsvd1; } v0; } oce_mq_ctx_t; /** * @brief [21] OPCODE_COMMON_CREATE_MQ * A MQ must be at least 16 entries deep (corresponding to 1 page) and * at most 128 entries deep (corresponding to 8 pages). */ struct mbx_create_common_mq { struct mbx_hdr hdr; union { struct { oce_mq_ctx_t context; struct phys_addr pages[8]; } req; struct { uint32_t mq_id:16; uint32_t rsvd0:16; } rsp; } params; }; struct mbx_create_common_mq_ex { struct mbx_hdr hdr; union { struct { oce_mq_ext_ctx_t context; struct phys_addr pages[8]; } req; struct { uint32_t mq_id:16; uint32_t rsvd0:16; } rsp; } params; }; /* [53] OPCODE_COMMON_DESTROY_MQ */ struct mbx_destroy_common_mq { struct mbx_hdr hdr; union { struct { #ifdef _BIG_ENDIAN uint16_t rsvd0; uint16_t id; #else uint16_t id; uint16_t rsvd0; #endif } req; struct { uint32_t rsvd0; } rsp; } params; }; /* [35] OPCODE_COMMON_GET_ FW_VERSION */ struct mbx_get_common_fw_version { struct mbx_hdr hdr; union { struct { uint32_t rsvd0; } req; struct { uint8_t fw_ver_str[32]; uint8_t fw_on_flash_ver_str[32]; } rsp; } params; }; /* [52] OPCODE_COMMON_CEV_MODIFY_MSI_MESSAGES */ struct mbx_common_cev_modify_msi_messages { struct mbx_hdr hdr; union { struct { uint32_t num_msi_msgs; } req; struct { uint32_t rsvd0; } rsp; } params; }; /* [36] OPCODE_COMMON_SET_FLOW_CONTROL */ /* [37] OPCODE_COMMON_GET_FLOW_CONTROL */ struct mbx_common_get_set_flow_control { struct mbx_hdr hdr; #ifdef _BIG_ENDIAN uint16_t tx_flow_control; uint16_t rx_flow_control; #else uint16_t rx_flow_control; uint16_t tx_flow_control; #endif }; enum e_flash_opcode { MGMT_FLASHROM_OPCODE_FLASH = 1, MGMT_FLASHROM_OPCODE_SAVE = 2 }; /* [06] OPCODE_READ_COMMON_FLASHROM */ /* [07] OPCODE_WRITE_COMMON_FLASHROM */ struct mbx_common_read_write_flashrom { struct mbx_hdr hdr; uint32_t flash_op_code; uint32_t flash_op_type; uint32_t data_buffer_size; uint32_t data_offset; uint8_t data_buffer[32768]; /* + IMAGE_TRANSFER_SIZE */ uint8_t rsvd[4]; }; struct oce_phy_info { uint16_t phy_type; uint16_t interface_type; uint32_t misc_params; uint16_t ext_phy_details; uint16_t rsvd; uint16_t auto_speeds_supported; uint16_t fixed_speeds_supported; uint32_t future_use[2]; }; struct mbx_common_phy_info { struct mbx_hdr hdr; union { struct { uint32_t rsvd0[4]; } req; struct { struct oce_phy_info phy_info; } rsp; } params; }; /*Lancer firmware*/ struct mbx_lancer_common_write_object { union { struct { struct mbx_hdr hdr; uint32_t write_length: 24; uint32_t rsvd: 7; uint32_t eof: 1; uint32_t write_offset; uint8_t object_name[104]; uint32_t descriptor_count; uint32_t buffer_length; uint32_t address_lower; uint32_t address_upper; } req; struct { uint8_t opcode; uint8_t subsystem; uint8_t rsvd1[2]; uint8_t status; uint8_t additional_status; uint8_t rsvd2[2]; uint32_t response_length; uint32_t actual_response_length; uint32_t actual_write_length; } rsp; } params; }; /** * @brief MBX Common Quiery Firmaware Config * This command retrieves firmware configuration parameters and adapter * resources available to the driver originating the request. The firmware * configuration defines supported protocols by the installed adapter firmware. * This includes which ULP processors support the specified protocols and * the number of TCP connections allowed for that protocol. */ struct mbx_common_query_fw_config { struct mbx_hdr hdr; union { struct { uint32_t rsvd0[30]; } req; struct { uint32_t config_number; uint32_t asic_revision; uint32_t port_id; /* used for stats retrieval */ uint32_t function_mode; struct { uint32_t ulp_mode; uint32_t nic_wqid_base; uint32_t nic_wq_tot; uint32_t toe_wqid_base; uint32_t toe_wq_tot; uint32_t toe_rqid_base; uint32_t toe_rqid_tot; uint32_t toe_defrqid_base; uint32_t toe_defrqid_count; uint32_t lro_rqid_base; uint32_t lro_rqid_tot; uint32_t iscsi_icd_base; uint32_t iscsi_icd_count; } ulp[2]; uint32_t function_caps; uint32_t cqid_base; uint32_t cqid_tot; uint32_t eqid_base; uint32_t eqid_tot; } rsp; } params; }; enum CQFW_CONFIG_NUMBER { FCN_NIC_ISCSI_Initiator = 0x0, FCN_ISCSI_Target = 0x3, FCN_FCoE = 0x7, FCN_ISCSI_Initiator_Target = 0x9, FCN_NIC_RDMA_TOE = 0xA, FCN_NIC_RDMA_FCoE = 0xB, FCN_NIC_RDMA_iSCSI = 0xC, FCN_NIC_iSCSI_FCoE = 0xD }; /** * @brief Function Capabilites * This field contains the flags indicating the capabilities of * the SLI Host’s PCI function. */ enum CQFW_FUNCTION_CAPABILITIES { FNC_UNCLASSIFIED_STATS = 0x1, FNC_RSS = 0x2, FNC_PROMISCUOUS = 0x4, FNC_LEGACY_MODE = 0x8, FNC_HDS = 0x4000, FNC_VMQ = 0x10000, FNC_NETQ = 0x20000, FNC_QGROUPS = 0x40000, FNC_LRO = 0x100000, FNC_VLAN_OFFLOAD = 0x800000 }; enum CQFW_ULP_MODES_SUPPORTED { ULP_TOE_MODE = 0x1, ULP_NIC_MODE = 0x2, ULP_RDMA_MODE = 0x4, ULP_ISCSI_INI_MODE = 0x10, ULP_ISCSI_TGT_MODE = 0x20, ULP_FCOE_INI_MODE = 0x40, ULP_FCOE_TGT_MODE = 0x80, ULP_DAL_MODE = 0x100, ULP_LRO_MODE = 0x200 }; /** * @brief Function Modes Supported * Valid function modes (or protocol-types) supported on the SLI-Host’s * PCIe function. This field is a logical OR of the following values: */ enum CQFW_FUNCTION_MODES_SUPPORTED { FNM_TOE_MODE = 0x1, /* TCP offload supported */ FNM_NIC_MODE = 0x2, /* Raw Ethernet supported */ FNM_RDMA_MODE = 0x4, /* RDMA protocol supported */ FNM_VM_MODE = 0x8, /* Virtual Machines supported */ FNM_ISCSI_INI_MODE = 0x10, /* iSCSI initiator supported */ FNM_ISCSI_TGT_MODE = 0x20, /* iSCSI target plus initiator */ FNM_FCOE_INI_MODE = 0x40, /* FCoE Initiator supported */ FNM_FCOE_TGT_MODE = 0x80, /* FCoE target supported */ FNM_DAL_MODE = 0x100, /* DAL supported */ FNM_LRO_MODE = 0x200, /* LRO supported */ FNM_FLEX10_MODE = 0x400, /* QinQ, FLEX-10 or VNIC */ FNM_NCSI_MODE = 0x800, /* NCSI supported */ FNM_IPV6_MODE = 0x1000, /* IPV6 stack enabled */ FNM_BE2_COMPAT_MODE = 0x2000, /* BE2 compatibility (BE3 disable)*/ FNM_INVALID_MODE = 0x8000, /* Invalid */ FNM_BE3_COMPAT_MODE = 0x10000, /* BE3 features */ FNM_VNIC_MODE = 0x20000, /* Set when IBM vNIC mode is set */ FNM_VNTAG_MODE = 0x40000, /* Set when VNTAG mode is set */ FNM_UMC_MODE = 0x1000000, /* Set when UMC mode is set */ FNM_UMC_DEF_EN = 0x100000, /* Set when UMC Default is set */ FNM_ONE_GB_EN = 0x200000, /* Set when 1GB Default is set */ FNM_VNIC_DEF_VALID = 0x400000, /* Set when VNIC_DEF_EN is valid */ FNM_VNIC_DEF_EN = 0x800000 /* Set when VNIC Default enabled */ }; struct mbx_common_config_vlan { struct mbx_hdr hdr; union { struct { #ifdef _BIG_ENDIAN uint8_t num_vlans; uint8_t untagged; uint8_t promisc; uint8_t if_id; #else uint8_t if_id; uint8_t promisc; uint8_t untagged; uint8_t num_vlans; #endif union { struct normal_vlan normal_vlans[64]; struct qinq_vlan qinq_vlans[32]; } tags; } req; struct { uint32_t rsvd; } rsp; } params; }; typedef struct iface_rx_filter_ctx { uint32_t global_flags_mask; uint32_t global_flags; uint32_t iface_flags_mask; uint32_t iface_flags; uint32_t if_id; #define IFACE_RX_NUM_MCAST_MAX 64 uint32_t num_mcast; struct mbx_mcast_addr { uint8_t byte[6]; } mac[IFACE_RX_NUM_MCAST_MAX]; } iface_rx_filter_ctx_t; /* [34] OPCODE_COMMON_SET_IFACE_RX_FILTER */ struct mbx_set_common_iface_rx_filter { struct mbx_hdr hdr; union { iface_rx_filter_ctx_t req; iface_rx_filter_ctx_t rsp; } params; }; struct be_set_eqd { uint32_t eq_id; uint32_t phase; uint32_t dm; }; /* [41] OPCODE_COMMON_MODIFY_EQ_DELAY */ struct mbx_modify_common_eq_delay { struct mbx_hdr hdr; union { struct { uint32_t num_eq; struct { uint32_t eq_id; uint32_t phase; uint32_t dm; } delay[8]; } req; struct { uint32_t rsvd0; } rsp; } params; }; /* [32] OPCODE_COMMON_GET_CNTL_ATTRIBUTES */ struct mgmt_hba_attr { int8_t flashrom_ver_str[32]; int8_t manufac_name[32]; uint32_t supp_modes; int8_t seeprom_ver_lo; int8_t seeprom_ver_hi; int8_t rsvd0[2]; uint32_t ioctl_data_struct_ver; uint32_t ep_fw_data_struct_ver; uint8_t ncsi_ver_str[12]; uint32_t def_ext_to; int8_t cntl_mod_num[32]; int8_t cntl_desc[64]; int8_t cntl_ser_num[32]; int8_t ip_ver_str[32]; int8_t fw_ver_str[32]; int8_t bios_ver_str[32]; int8_t redboot_ver_str[32]; int8_t drv_ver_str[32]; int8_t fw_on_flash_ver_str[32]; uint32_t funcs_supp; uint16_t max_cdblen; uint8_t asic_rev; uint8_t gen_guid[16]; uint8_t hba_port_count; uint16_t default_link_down_timeout; uint8_t iscsi_ver_min_max; uint8_t multifunc_dev; uint8_t cache_valid; uint8_t hba_status; uint8_t max_domains_supp; uint8_t phy_port; uint32_t fw_post_status; uint32_t hba_mtu[8]; uint8_t iSCSI_feat; uint8_t asic_gen; uint8_t future_u8[2]; uint32_t future_u32[3]; }; struct mgmt_cntl_attr { struct mgmt_hba_attr hba_attr; uint16_t pci_vendor_id; uint16_t pci_device_id; uint16_t pci_sub_vendor_id; uint16_t pci_sub_system_id; uint8_t pci_bus_num; uint8_t pci_dev_num; uint8_t pci_func_num; uint8_t interface_type; uint64_t unique_id; uint8_t netfilters; uint8_t rsvd0[3]; uint32_t future_u32[4]; }; struct mbx_common_get_cntl_attr { struct mbx_hdr hdr; union { struct { uint32_t rsvd0; } req; struct { struct mgmt_cntl_attr cntl_attr_info; } rsp; } params; }; /* [59] OPCODE_ADD_COMMON_IFACE_MAC */ struct mbx_add_common_iface_mac { struct mbx_hdr hdr; union { struct { uint32_t if_id; uint8_t mac_address[6]; uint8_t rsvd0[2]; } req; struct { uint32_t pmac_id; } rsp; } params; }; /* [60] OPCODE_DEL_COMMON_IFACE_MAC */ struct mbx_del_common_iface_mac { struct mbx_hdr hdr; union { struct { uint32_t if_id; uint32_t pmac_id; } req; struct { uint32_t rsvd0; } rsp; } params; }; /* [8] OPCODE_QUERY_COMMON_MAX_MBX_BUFFER_SIZE */ struct mbx_query_common_max_mbx_buffer_size { struct mbx_hdr hdr; struct { uint32_t max_ioctl_bufsz; } rsp; }; /* [61] OPCODE_COMMON_FUNCTION_RESET */ struct ioctl_common_function_reset { struct mbx_hdr hdr; }; /* [73] OPCODE_COMMON_READ_TRANSRECEIVER_DATA */ struct mbx_read_common_transrecv_data { struct mbx_hdr hdr; union { struct { uint32_t page_num; uint32_t port; } req; struct { uint32_t page_num; uint32_t port; uint32_t page_data[32]; } rsp; } params; }; /* [80] OPCODE_COMMON_FUNCTION_LINK_CONFIG */ struct mbx_common_func_link_cfg { struct mbx_hdr hdr; union { struct { uint32_t enable; } req; struct { uint32_t rsvd0; } rsp; } params; }; /* [103] OPCODE_COMMON_SET_FUNCTIONAL_CAPS */ #define CAP_SW_TIMESTAMPS 2 #define CAP_BE3_NATIVE_ERX_API 4 struct mbx_common_set_function_cap { struct mbx_hdr hdr; union { struct { uint32_t valid_capability_flags; uint32_t capability_flags; uint8_t sbz[212]; } req; struct { uint32_t valid_capability_flags; uint32_t capability_flags; uint8_t sbz[212]; } rsp; } params; }; struct mbx_lowlevel_test_loopback_mode { struct mbx_hdr hdr; union { struct { uint32_t loopback_type; uint32_t num_pkts; uint64_t pattern; uint32_t src_port; uint32_t dest_port; uint32_t pkt_size; }req; struct { uint32_t status; uint32_t num_txfer; uint32_t num_rx; uint32_t miscomp_off; uint32_t ticks_compl; }rsp; } params; }; struct mbx_lowlevel_set_loopback_mode { struct mbx_hdr hdr; union { struct { uint8_t src_port; uint8_t dest_port; uint8_t loopback_type; uint8_t loopback_state; } req; struct { uint8_t rsvd0[4]; } rsp; } params; }; #define MAX_RESC_DESC 256 #define RESC_DESC_SIZE 88 #define ACTIVE_PROFILE 2 #define NIC_RESC_DESC_TYPE_V0 0x41 #define NIC_RESC_DESC_TYPE_V1 0x51 /* OPCODE_COMMON_GET_FUNCTION_CONFIG */ struct mbx_common_get_func_config { struct mbx_hdr hdr; union { struct { uint8_t rsvd; uint8_t type; uint16_t rsvd1; } req; struct { uint32_t desc_count; uint8_t resources[MAX_RESC_DESC * RESC_DESC_SIZE]; } rsp; } params; }; /* OPCODE_COMMON_GET_PROFILE_CONFIG */ struct mbx_common_get_profile_config { struct mbx_hdr hdr; union { struct { uint8_t rsvd; uint8_t type; uint16_t rsvd1; } req; struct { uint32_t desc_count; uint8_t resources[MAX_RESC_DESC * RESC_DESC_SIZE]; } rsp; } params; }; struct oce_nic_resc_desc { uint8_t desc_type; uint8_t desc_len; uint8_t rsvd1; uint8_t flags; uint8_t vf_num; uint8_t rsvd2; uint8_t pf_num; uint8_t rsvd3; uint16_t unicast_mac_count; uint8_t rsvd4[6]; uint16_t mcc_count; uint16_t vlan_count; uint16_t mcast_mac_count; uint16_t txq_count; uint16_t rq_count; uint16_t rssq_count; uint16_t lro_count; uint16_t cq_count; uint16_t toe_conn_count; uint16_t eq_count; uint32_t rsvd5; uint32_t cap_flags; uint8_t link_param; uint8_t rsvd6[3]; uint32_t bw_min; uint32_t bw_max; uint8_t acpi_params; uint8_t wol_param; uint16_t rsvd7; uint32_t rsvd8[7]; }; struct flash_file_hdr { uint8_t sign[52]; uint8_t ufi_version[4]; uint32_t file_len; uint32_t cksum; uint32_t antidote; uint32_t num_imgs; uint8_t build[24]; uint8_t asic_type_rev; uint8_t rsvd[31]; }; struct image_hdr { uint32_t imageid; uint32_t imageoffset; uint32_t imagelength; uint32_t image_checksum; uint8_t image_version[32]; }; struct flash_section_hdr { uint32_t format_rev; uint32_t cksum; uint32_t antidote; uint32_t num_images; uint8_t id_string[128]; uint32_t rsvd[4]; }; struct flash_section_entry { uint32_t type; uint32_t offset; uint32_t pad_size; uint32_t image_size; uint32_t cksum; uint32_t entry_point; uint32_t rsvd0; uint32_t rsvd1; uint8_t ver_data[32]; }; struct flash_sec_info { uint8_t cookie[32]; struct flash_section_hdr fsec_hdr; struct flash_section_entry fsec_entry[32]; }; enum LOWLEVEL_SUBSYSTEM_OPCODES { /* Opcodes used for lowlevel functions common to many subystems. * Some of these opcodes are used for diagnostic functions only. * These opcodes use the MBX_SUBSYSTEM_LOWLEVEL subsystem code. */ OPCODE_LOWLEVEL_TEST_LOOPBACK = 18, OPCODE_LOWLEVEL_SET_LOOPBACK_MODE = 19, OPCODE_LOWLEVEL_GET_LOOPBACK_MODE = 20 }; enum LLDP_SUBSYSTEM_OPCODES { /* Opcodes used for LLDP susbsytem for configuring the LLDP state machines. */ OPCODE_LLDP_GET_CFG = 1, OPCODE_LLDP_SET_CFG = 2, OPCODE_LLDP_GET_STATS = 3 }; enum DCBX_SUBSYSTEM_OPCODES { /* Opcodes used for DCBX. */ OPCODE_DCBX_GET_CFG = 1, OPCODE_DCBX_SET_CFG = 2, OPCODE_DCBX_GET_MIB_INFO = 3, OPCODE_DCBX_GET_DCBX_MODE = 4, OPCODE_DCBX_SET_MODE = 5 }; enum DMTF_SUBSYSTEM_OPCODES { /* Opcodes used for DCBX subsystem. */ OPCODE_DMTF_EXEC_CLP_CMD = 1 }; enum DIAG_SUBSYSTEM_OPCODES { /* Opcodes used for diag functions common to many subsystems. */ OPCODE_DIAG_RUN_DMA_TEST = 1, OPCODE_DIAG_RUN_MDIO_TEST = 2, OPCODE_DIAG_RUN_NLB_TEST = 3, OPCODE_DIAG_RUN_ARM_TIMER_TEST = 4, OPCODE_DIAG_GET_MAC = 5 }; enum VENDOR_SUBSYSTEM_OPCODES { /* Opcodes used for Vendor subsystem. */ OPCODE_VENDOR_SLI = 1 }; /* Management Status Codes */ enum MGMT_STATUS_SUCCESS { MGMT_SUCCESS = 0, MGMT_FAILED = 1, MGMT_ILLEGAL_REQUEST = 2, MGMT_ILLEGAL_FIELD = 3, MGMT_INSUFFICIENT_BUFFER = 4, MGMT_UNAUTHORIZED_REQUEST = 5, MGMT_INVALID_ISNS_ADDRESS = 10, MGMT_INVALID_IPADDR = 11, MGMT_INVALID_GATEWAY = 12, MGMT_INVALID_SUBNETMASK = 13, MGMT_INVALID_TARGET_IPADDR = 16, MGMT_TGTTBL_FULL = 20, MGMT_FLASHROM_SAVE_FAILED = 23, MGMT_IOCTLHANDLE_ALLOC_FAILED = 27, MGMT_INVALID_SESSION = 31, MGMT_INVALID_CONNECTION = 32, MGMT_BTL_PATH_EXCEEDS_OSM_LIMIT = 33, MGMT_BTL_TGTID_EXCEEDS_OSM_LIMIT = 34, MGMT_BTL_PATH_TGTID_OCCUPIED = 35, MGMT_BTL_NO_FREE_SLOT_PATH = 36, MGMT_BTL_NO_FREE_SLOT_TGTID = 37, MGMT_POLL_IOCTL_TIMEOUT = 40, MGMT_ERROR_ACITISCSI = 41, MGMT_BUFFER_SIZE_EXCEED_OSM_OR_OS_LIMIT = 43, MGMT_REBOOT_REQUIRED = 44, MGMT_INSUFFICIENT_TIMEOUT = 45, MGMT_IPADDR_NOT_SET = 46, MGMT_IPADDR_DUP_DETECTED = 47, MGMT_CANT_REMOVE_LAST_CONNECTION = 48, MGMT_TARGET_BUSY = 49, MGMT_TGT_ERR_LISTEN_SOCKET = 50, MGMT_TGT_ERR_BIND_SOCKET = 51, MGMT_TGT_ERR_NO_SOCKET = 52, MGMT_TGT_ERR_ISNS_COMM_FAILED = 55, MGMT_CANNOT_DELETE_BOOT_TARGET = 56, MGMT_TGT_PORTAL_MODE_IN_LISTEN = 57, MGMT_FCF_IN_USE = 58 , MGMT_NO_CQE = 59, MGMT_TARGET_NOT_FOUND = 65, MGMT_NOT_SUPPORTED = 66, MGMT_NO_FCF_RECORDS = 67, MGMT_FEATURE_NOT_SUPPORTED = 68, MGMT_VPD_FUNCTION_OUT_OF_RANGE = 69, MGMT_VPD_FUNCTION_TYPE_INCORRECT = 70, MGMT_INVALID_NON_EMBEDDED_WRB = 71, MGMT_OOR = 100, MGMT_INVALID_PD = 101, MGMT_STATUS_PD_INUSE = 102, MGMT_INVALID_CQ = 103, MGMT_INVALID_QP = 104, MGMT_INVALID_STAG = 105, MGMT_ORD_EXCEEDS = 106, MGMT_IRD_EXCEEDS = 107, MGMT_SENDQ_WQE_EXCEEDS = 108, MGMT_RECVQ_RQE_EXCEEDS = 109, MGMT_SGE_SEND_EXCEEDS = 110, MGMT_SGE_WRITE_EXCEEDS = 111, MGMT_SGE_RECV_EXCEEDS = 112, MGMT_INVALID_STATE_CHANGE = 113, MGMT_MW_BOUND = 114, MGMT_INVALID_VA = 115, MGMT_INVALID_LENGTH = 116, MGMT_INVALID_FBO = 117, MGMT_INVALID_ACC_RIGHTS = 118, MGMT_INVALID_PBE_SIZE = 119, MGMT_INVALID_PBL_ENTRY = 120, MGMT_INVALID_PBL_OFFSET = 121, MGMT_ADDR_NON_EXIST = 122, MGMT_INVALID_VLANID = 123, MGMT_INVALID_MTU = 124, MGMT_INVALID_BACKLOG = 125, MGMT_CONNECTION_INPROGRESS = 126, MGMT_INVALID_RQE_SIZE = 127, MGMT_INVALID_RQE_ENTRY = 128 }; /* Additional Management Status Codes */ enum MGMT_ADDI_STATUS { MGMT_ADDI_NO_STATUS = 0, MGMT_ADDI_INVALID_IPTYPE = 1, MGMT_ADDI_TARGET_HANDLE_NOT_FOUND = 9, MGMT_ADDI_SESSION_HANDLE_NOT_FOUND = 10, MGMT_ADDI_CONNECTION_HANDLE_NOT_FOUND = 11, MGMT_ADDI_ACTIVE_SESSIONS_PRESENT = 16, MGMT_ADDI_SESSION_ALREADY_OPENED = 17, MGMT_ADDI_SESSION_ALREADY_CLOSED = 18, MGMT_ADDI_DEST_HOST_UNREACHABLE = 19, MGMT_ADDI_LOGIN_IN_PROGRESS = 20, MGMT_ADDI_TCP_CONNECT_FAILED = 21, MGMT_ADDI_INSUFFICIENT_RESOURCES = 22, MGMT_ADDI_LINK_DOWN = 23, MGMT_ADDI_DHCP_ERROR = 24, MGMT_ADDI_CONNECTION_OFFLOADED = 25, MGMT_ADDI_CONNECTION_NOT_OFFLOADED = 26, MGMT_ADDI_CONNECTION_UPLOAD_IN_PROGRESS = 27, MGMT_ADDI_REQUEST_REJECTED = 28, MGMT_ADDI_INVALID_SUBSYSTEM = 29, MGMT_ADDI_INVALID_OPCODE = 30, MGMT_ADDI_INVALID_MAXCONNECTION_PARAM = 31, MGMT_ADDI_INVALID_KEY = 32, MGMT_ADDI_INVALID_DOMAIN = 35, MGMT_ADDI_LOGIN_INITIATOR_ERROR = 43, MGMT_ADDI_LOGIN_AUTHENTICATION_ERROR = 44, MGMT_ADDI_LOGIN_AUTHORIZATION_ERROR = 45, MGMT_ADDI_LOGIN_NOT_FOUND = 46, MGMT_ADDI_LOGIN_TARGET_REMOVED = 47, MGMT_ADDI_LOGIN_UNSUPPORTED_VERSION = 48, MGMT_ADDI_LOGIN_TOO_MANY_CONNECTIONS = 49, MGMT_ADDI_LOGIN_MISSING_PARAMETER = 50, MGMT_ADDI_LOGIN_NO_SESSION_SPANNING = 51, MGMT_ADDI_LOGIN_SESSION_TYPE_NOT_SUPPORTED = 52, MGMT_ADDI_LOGIN_SESSION_DOES_NOT_EXIST = 53, MGMT_ADDI_LOGIN_INVALID_DURING_LOGIN = 54, MGMT_ADDI_LOGIN_TARGET_ERROR = 55, MGMT_ADDI_LOGIN_SERVICE_UNAVAILABLE = 56, MGMT_ADDI_LOGIN_OUT_OF_RESOURCES = 57, MGMT_ADDI_SAME_CHAP_SECRET = 58, MGMT_ADDI_INVALID_SECRET_LENGTH = 59, MGMT_ADDI_DUPLICATE_ENTRY = 60, MGMT_ADDI_SETTINGS_MODIFIED_REBOOT_REQD = 63, MGMT_ADDI_INVALID_EXTENDED_TIMEOUT = 64, MGMT_ADDI_INVALID_INTERFACE_HANDLE = 65, MGMT_ADDI_ERR_VLAN_ON_DEF_INTERFACE = 66, MGMT_ADDI_INTERFACE_DOES_NOT_EXIST = 67, MGMT_ADDI_INTERFACE_ALREADY_EXISTS = 68, MGMT_ADDI_INVALID_VLAN_RANGE = 69, MGMT_ADDI_ERR_SET_VLAN = 70, MGMT_ADDI_ERR_DEL_VLAN = 71, MGMT_ADDI_CANNOT_DEL_DEF_INTERFACE = 72, MGMT_ADDI_DHCP_REQ_ALREADY_PENDING = 73, MGMT_ADDI_TOO_MANY_INTERFACES = 74, MGMT_ADDI_INVALID_REQUEST = 75 }; enum NIC_SUBSYSTEM_OPCODES { /** * @brief NIC Subsystem Opcodes (see Network SLI-4 manual >= Rev4, v21-2) * These opcodes are used for configuring the Ethernet interfaces. * These opcodes all use the MBX_SUBSYSTEM_NIC subsystem code. */ NIC_CONFIG_RSS = 1, NIC_CONFIG_ACPI = 2, NIC_CONFIG_PROMISCUOUS = 3, NIC_GET_STATS = 4, NIC_CREATE_WQ = 7, NIC_CREATE_RQ = 8, NIC_DELETE_WQ = 9, NIC_DELETE_RQ = 10, NIC_CONFIG_ACPI_WOL_MAGIC = 12, NIC_GET_NETWORK_STATS = 13, NIC_CREATE_HDS_RQ = 16, NIC_DELETE_HDS_RQ = 17, NIC_GET_PPORT_STATS = 18, NIC_GET_VPORT_STATS = 19, NIC_GET_QUEUE_STATS = 20 }; /* Hash option flags for RSS enable */ enum RSS_ENABLE_FLAGS { RSS_ENABLE_NONE = 0x0, /* (No RSS) */ RSS_ENABLE_IPV4 = 0x1, /* (IPV4 HASH enabled ) */ RSS_ENABLE_TCP_IPV4 = 0x2, /* (TCP IPV4 Hash enabled) */ RSS_ENABLE_IPV6 = 0x4, /* (IPV6 HASH enabled) */ RSS_ENABLE_TCP_IPV6 = 0x8, /* (TCP IPV6 HASH */ RSS_ENABLE_UDP_IPV4 = 0x10, /* UDP IPV4 HASH */ RSS_ENABLE_UDP_IPV6 = 0x20 /* UDP IPV6 HASH */ }; #define RSS_ENABLE (RSS_ENABLE_IPV4 | RSS_ENABLE_TCP_IPV4) #define RSS_DISABLE RSS_ENABLE_NONE /* NIC header WQE */ struct oce_nic_hdr_wqe { union { struct { #ifdef _BIG_ENDIAN /* dw0 */ uint32_t rsvd0; /* dw1 */ uint32_t last_seg_udp_len:14; uint32_t rsvd1:18; /* dw2 */ uint32_t lso_mss:14; uint32_t num_wqe:5; uint32_t rsvd4:2; uint32_t vlan:1; uint32_t lso:1; uint32_t tcpcs:1; uint32_t udpcs:1; uint32_t ipcs:1; - uint32_t rsvd3:1; - uint32_t rsvd2:1; + uint32_t mgmt:1; + uint32_t lso6:1; uint32_t forward:1; uint32_t crc:1; uint32_t event:1; uint32_t complete:1; /* dw3 */ uint32_t vlan_tag:16; uint32_t total_length:16; #else /* dw0 */ uint32_t rsvd0; /* dw1 */ uint32_t rsvd1:18; uint32_t last_seg_udp_len:14; /* dw2 */ uint32_t complete:1; uint32_t event:1; uint32_t crc:1; uint32_t forward:1; - uint32_t rsvd2:1; - uint32_t rsvd3:1; + uint32_t lso6:1; + uint32_t mgmt:1; uint32_t ipcs:1; uint32_t udpcs:1; uint32_t tcpcs:1; uint32_t lso:1; uint32_t vlan:1; uint32_t rsvd4:2; uint32_t num_wqe:5; uint32_t lso_mss:14; /* dw3 */ uint32_t total_length:16; uint32_t vlan_tag:16; #endif } s; uint32_t dw[4]; } u0; }; /* NIC fragment WQE */ struct oce_nic_frag_wqe { union { struct { /* dw0 */ uint32_t frag_pa_hi; /* dw1 */ uint32_t frag_pa_lo; /* dw2 */ uint32_t rsvd0; uint32_t frag_len; } s; uint32_t dw[4]; } u0; }; /* Ethernet Tx Completion Descriptor */ struct oce_nic_tx_cqe { union { struct { #ifdef _BIG_ENDIAN /* dw 0 */ uint32_t status:4; uint32_t rsvd0:8; uint32_t port:2; uint32_t ct:2; uint32_t wqe_index:16; /* dw 1 */ uint32_t rsvd1:5; uint32_t cast_enc:2; uint32_t lso:1; uint32_t nwh_bytes:8; uint32_t user_bytes:16; /* dw 2 */ uint32_t rsvd2; /* dw 3 */ uint32_t valid:1; uint32_t rsvd3:4; uint32_t wq_id:11; uint32_t num_pkts:16; #else /* dw 0 */ uint32_t wqe_index:16; uint32_t ct:2; uint32_t port:2; uint32_t rsvd0:8; uint32_t status:4; /* dw 1 */ uint32_t user_bytes:16; uint32_t nwh_bytes:8; uint32_t lso:1; uint32_t cast_enc:2; uint32_t rsvd1:5; /* dw 2 */ uint32_t rsvd2; /* dw 3 */ uint32_t num_pkts:16; uint32_t wq_id:11; uint32_t rsvd3:4; uint32_t valid:1; #endif } s; uint32_t dw[4]; } u0; }; #define WQ_CQE_VALID(_cqe) (_cqe->u0.dw[3]) #define WQ_CQE_INVALIDATE(_cqe) (_cqe->u0.dw[3] = 0) /* Receive Queue Entry (RQE) */ struct oce_nic_rqe { union { struct { uint32_t frag_pa_hi; uint32_t frag_pa_lo; } s; uint32_t dw[2]; } u0; }; /* NIC Receive CQE */ struct oce_nic_rx_cqe { union { struct { #ifdef _BIG_ENDIAN /* dw 0 */ uint32_t ip_options:1; uint32_t port:1; uint32_t pkt_size:14; uint32_t vlan_tag:16; /* dw 1 */ uint32_t num_fragments:3; uint32_t switched:1; uint32_t ct:2; uint32_t frag_index:10; uint32_t rsvd0:1; uint32_t vlan_tag_present:1; uint32_t mac_dst:6; uint32_t ip_ver:1; uint32_t l4_cksum_pass:1; uint32_t ip_cksum_pass:1; uint32_t udpframe:1; uint32_t tcpframe:1; uint32_t ipframe:1; uint32_t rss_hp:1; uint32_t error:1; /* dw 2 */ uint32_t valid:1; uint32_t hds_type:2; uint32_t lro_pkt:1; uint32_t rsvd4:1; uint32_t hds_hdr_size:12; uint32_t hds_hdr_frag_index:10; uint32_t rss_bank:1; uint32_t qnq:1; uint32_t pkt_type:2; uint32_t rss_flush:1; /* dw 3 */ uint32_t rss_hash_value; #else /* dw 0 */ uint32_t vlan_tag:16; uint32_t pkt_size:14; uint32_t port:1; uint32_t ip_options:1; /* dw 1 */ uint32_t error:1; uint32_t rss_hp:1; uint32_t ipframe:1; uint32_t tcpframe:1; uint32_t udpframe:1; uint32_t ip_cksum_pass:1; uint32_t l4_cksum_pass:1; uint32_t ip_ver:1; uint32_t mac_dst:6; uint32_t vlan_tag_present:1; uint32_t rsvd0:1; uint32_t frag_index:10; uint32_t ct:2; uint32_t switched:1; uint32_t num_fragments:3; /* dw 2 */ uint32_t rss_flush:1; uint32_t pkt_type:2; uint32_t qnq:1; uint32_t rss_bank:1; uint32_t hds_hdr_frag_index:10; uint32_t hds_hdr_size:12; uint32_t rsvd4:1; uint32_t lro_pkt:1; uint32_t hds_type:2; uint32_t valid:1; /* dw 3 */ uint32_t rss_hash_value; #endif } s; uint32_t dw[4]; } u0; }; /* NIC Receive CQE_v1 */ struct oce_nic_rx_cqe_v1 { union { struct { #ifdef _BIG_ENDIAN /* dw 0 */ uint32_t ip_options:1; uint32_t vlan_tag_present:1; uint32_t pkt_size:14; uint32_t vlan_tag:16; /* dw 1 */ uint32_t num_fragments:3; uint32_t switched:1; uint32_t ct:2; uint32_t frag_index:10; uint32_t rsvd0:1; uint32_t mac_dst:7; uint32_t ip_ver:1; uint32_t l4_cksum_pass:1; uint32_t ip_cksum_pass:1; uint32_t udpframe:1; uint32_t tcpframe:1; uint32_t ipframe:1; uint32_t rss_hp:1; uint32_t error:1; /* dw 2 */ uint32_t valid:1; uint32_t rsvd4:13; uint32_t hds_hdr_size: uint32_t hds_hdr_frag_index:8; uint32_t vlantag:1; uint32_t port:2; uint32_t rss_bank:1; uint32_t qnq:1; uint32_t pkt_type:2; uint32_t rss_flush:1; /* dw 3 */ uint32_t rss_hash_value; #else /* dw 0 */ uint32_t vlan_tag:16; uint32_t pkt_size:14; uint32_t vlan_tag_present:1; uint32_t ip_options:1; /* dw 1 */ uint32_t error:1; uint32_t rss_hp:1; uint32_t ipframe:1; uint32_t tcpframe:1; uint32_t udpframe:1; uint32_t ip_cksum_pass:1; uint32_t l4_cksum_pass:1; uint32_t ip_ver:1; uint32_t mac_dst:7; uint32_t rsvd0:1; uint32_t frag_index:10; uint32_t ct:2; uint32_t switched:1; uint32_t num_fragments:3; /* dw 2 */ uint32_t rss_flush:1; uint32_t pkt_type:2; uint32_t qnq:1; uint32_t rss_bank:1; uint32_t port:2; uint32_t vlantag:1; uint32_t hds_hdr_frag_index:8; uint32_t hds_hdr_size:2; uint32_t rsvd4:13; uint32_t valid:1; /* dw 3 */ uint32_t rss_hash_value; #endif } s; uint32_t dw[4]; } u0; }; #define RQ_CQE_VALID_MASK 0x80 #define RQ_CQE_VALID(_cqe) (_cqe->u0.dw[2]) #define RQ_CQE_INVALIDATE(_cqe) (_cqe->u0.dw[2] = 0) struct mbx_config_nic_promiscuous { struct mbx_hdr hdr; union { struct { #ifdef _BIG_ENDIAN uint16_t rsvd0; uint8_t port1_promisc; uint8_t port0_promisc; #else uint8_t port0_promisc; uint8_t port1_promisc; uint16_t rsvd0; #endif } req; struct { uint32_t rsvd0; } rsp; } params; }; typedef union oce_wq_ctx_u { uint32_t dw[17]; struct { #ifdef _BIG_ENDIAN /* dw4 */ uint32_t dw4rsvd2:8; uint32_t nic_wq_type:8; uint32_t dw4rsvd1:8; uint32_t num_pages:8; /* dw5 */ uint32_t dw5rsvd2:12; uint32_t wq_size:4; uint32_t dw5rsvd1:16; /* dw6 */ uint32_t valid:1; uint32_t dw6rsvd1:31; /* dw7 */ uint32_t dw7rsvd1:16; uint32_t cq_id:16; #else /* dw4 */ uint32_t num_pages:8; #if 0 uint32_t dw4rsvd1:8; #else /* PSP: this workaround is not documented: fill 0x01 for ulp_mask */ uint32_t ulp_mask:8; #endif uint32_t nic_wq_type:8; uint32_t dw4rsvd2:8; /* dw5 */ uint32_t dw5rsvd1:16; uint32_t wq_size:4; uint32_t dw5rsvd2:12; /* dw6 */ uint32_t dw6rsvd1:31; uint32_t valid:1; /* dw7 */ uint32_t cq_id:16; uint32_t dw7rsvd1:16; #endif /* dw8 - dw20 */ uint32_t dw8_20rsvd1[13]; } v0; struct { #ifdef _BIG_ENDIAN /* dw4 */ uint32_t dw4rsvd2:8; uint32_t nic_wq_type:8; uint32_t dw4rsvd1:8; uint32_t num_pages:8; /* dw5 */ uint32_t dw5rsvd2:12; uint32_t wq_size:4; uint32_t iface_id:16; /* dw6 */ uint32_t valid:1; uint32_t dw6rsvd1:31; /* dw7 */ uint32_t dw7rsvd1:16; uint32_t cq_id:16; #else /* dw4 */ uint32_t num_pages:8; uint32_t dw4rsvd1:8; uint32_t nic_wq_type:8; uint32_t dw4rsvd2:8; /* dw5 */ uint32_t iface_id:16; uint32_t wq_size:4; uint32_t dw5rsvd2:12; /* dw6 */ uint32_t dw6rsvd1:31; uint32_t valid:1; /* dw7 */ uint32_t cq_id:16; uint32_t dw7rsvd1:16; #endif /* dw8 - dw20 */ uint32_t dw8_20rsvd1[13]; } v1; } oce_wq_ctx_t; /** * @brief [07] NIC_CREATE_WQ * @note * Lancer requires an InterfaceID to be specified with every WQ. This * is the basis for NIC IOV where the Interface maps to a vPort and maps * to both Tx and Rx sides. */ #define OCE_WQ_TYPE_FORWARDING 0x1 /* wq forwards pkts to TOE */ #define OCE_WQ_TYPE_STANDARD 0x2 /* wq sends network pkts */ struct mbx_create_nic_wq { struct mbx_hdr hdr; union { struct { uint8_t num_pages; uint8_t ulp_num; uint16_t nic_wq_type; uint16_t if_id; uint8_t wq_size; uint8_t rsvd1; uint32_t rsvd2; uint16_t cq_id; uint16_t rsvd3; uint32_t rsvd4[13]; struct phys_addr pages[8]; } req; struct { uint16_t wq_id; uint16_t rid; uint32_t db_offset; uint8_t tc_id; uint8_t rsvd0[3]; } rsp; } params; }; /* [09] NIC_DELETE_WQ */ struct mbx_delete_nic_wq { /* dw0 - dw3 */ struct mbx_hdr hdr; union { struct { #ifdef _BIG_ENDIAN /* dw4 */ uint16_t rsvd0; uint16_t wq_id; #else /* dw4 */ uint16_t wq_id; uint16_t rsvd0; #endif } req; struct { uint32_t rsvd0; } rsp; } params; }; struct mbx_create_nic_rq { struct mbx_hdr hdr; union { struct { uint16_t cq_id; uint8_t frag_size; uint8_t num_pages; struct phys_addr pages[2]; uint32_t if_id; uint16_t max_frame_size; uint16_t page_size; uint32_t is_rss_queue; } req; struct { uint16_t rq_id; uint8_t rss_cpuid; uint8_t rsvd0; } rsp; } params; }; /* [10] NIC_DELETE_RQ */ struct mbx_delete_nic_rq { /* dw0 - dw3 */ struct mbx_hdr hdr; union { struct { #ifdef _BIG_ENDIAN /* dw4 */ uint16_t bypass_flush; uint16_t rq_id; #else /* dw4 */ uint16_t rq_id; uint16_t bypass_flush; #endif } req; struct { /* dw4 */ uint32_t rsvd0; } rsp; } params; }; struct oce_port_rxf_stats_v0 { uint32_t rx_bytes_lsd; /* dword 0*/ uint32_t rx_bytes_msd; /* dword 1*/ uint32_t rx_total_frames; /* dword 2*/ uint32_t rx_unicast_frames; /* dword 3*/ uint32_t rx_multicast_frames; /* dword 4*/ uint32_t rx_broadcast_frames; /* dword 5*/ uint32_t rx_crc_errors; /* dword 6*/ uint32_t rx_alignment_symbol_errors; /* dword 7*/ uint32_t rx_pause_frames; /* dword 8*/ uint32_t rx_control_frames; /* dword 9*/ uint32_t rx_in_range_errors; /* dword 10*/ uint32_t rx_out_range_errors; /* dword 11*/ uint32_t rx_frame_too_long; /* dword 12*/ uint32_t rx_address_match_errors; /* dword 13*/ uint32_t rx_vlan_mismatch; /* dword 14*/ uint32_t rx_dropped_too_small; /* dword 15*/ uint32_t rx_dropped_too_short; /* dword 16*/ uint32_t rx_dropped_header_too_small; /* dword 17*/ uint32_t rx_dropped_tcp_length; /* dword 18*/ uint32_t rx_dropped_runt; /* dword 19*/ uint32_t rx_64_byte_packets; /* dword 20*/ uint32_t rx_65_127_byte_packets; /* dword 21*/ uint32_t rx_128_256_byte_packets; /* dword 22*/ uint32_t rx_256_511_byte_packets; /* dword 23*/ uint32_t rx_512_1023_byte_packets; /* dword 24*/ uint32_t rx_1024_1518_byte_packets; /* dword 25*/ uint32_t rx_1519_2047_byte_packets; /* dword 26*/ uint32_t rx_2048_4095_byte_packets; /* dword 27*/ uint32_t rx_4096_8191_byte_packets; /* dword 28*/ uint32_t rx_8192_9216_byte_packets; /* dword 29*/ uint32_t rx_ip_checksum_errs; /* dword 30*/ uint32_t rx_tcp_checksum_errs; /* dword 31*/ uint32_t rx_udp_checksum_errs; /* dword 32*/ uint32_t rx_non_rss_packets; /* dword 33*/ uint32_t rx_ipv4_packets; /* dword 34*/ uint32_t rx_ipv6_packets; /* dword 35*/ uint32_t rx_ipv4_bytes_lsd; /* dword 36*/ uint32_t rx_ipv4_bytes_msd; /* dword 37*/ uint32_t rx_ipv6_bytes_lsd; /* dword 38*/ uint32_t rx_ipv6_bytes_msd; /* dword 39*/ uint32_t rx_chute1_packets; /* dword 40*/ uint32_t rx_chute2_packets; /* dword 41*/ uint32_t rx_chute3_packets; /* dword 42*/ uint32_t rx_management_packets; /* dword 43*/ uint32_t rx_switched_unicast_packets; /* dword 44*/ uint32_t rx_switched_multicast_packets; /* dword 45*/ uint32_t rx_switched_broadcast_packets; /* dword 46*/ uint32_t tx_bytes_lsd; /* dword 47*/ uint32_t tx_bytes_msd; /* dword 48*/ uint32_t tx_unicastframes; /* dword 49*/ uint32_t tx_multicastframes; /* dword 50*/ uint32_t tx_broadcastframes; /* dword 51*/ uint32_t tx_pauseframes; /* dword 52*/ uint32_t tx_controlframes; /* dword 53*/ uint32_t tx_64_byte_packets; /* dword 54*/ uint32_t tx_65_127_byte_packets; /* dword 55*/ uint32_t tx_128_256_byte_packets; /* dword 56*/ uint32_t tx_256_511_byte_packets; /* dword 57*/ uint32_t tx_512_1023_byte_packets; /* dword 58*/ uint32_t tx_1024_1518_byte_packets; /* dword 59*/ uint32_t tx_1519_2047_byte_packets; /* dword 60*/ uint32_t tx_2048_4095_byte_packets; /* dword 61*/ uint32_t tx_4096_8191_byte_packets; /* dword 62*/ uint32_t tx_8192_9216_byte_packets; /* dword 63*/ uint32_t rxpp_fifo_overflow_drop; /* dword 64*/ uint32_t rx_input_fifo_overflow_drop; /* dword 65*/ }; struct oce_rxf_stats_v0 { struct oce_port_rxf_stats_v0 port[2]; uint32_t rx_drops_no_pbuf; /* dword 132*/ uint32_t rx_drops_no_txpb; /* dword 133*/ uint32_t rx_drops_no_erx_descr; /* dword 134*/ uint32_t rx_drops_no_tpre_descr; /* dword 135*/ uint32_t management_rx_port_packets; /* dword 136*/ uint32_t management_rx_port_bytes; /* dword 137*/ uint32_t management_rx_port_pause_frames;/* dword 138*/ uint32_t management_rx_port_errors; /* dword 139*/ uint32_t management_tx_port_packets; /* dword 140*/ uint32_t management_tx_port_bytes; /* dword 141*/ uint32_t management_tx_port_pause; /* dword 142*/ uint32_t management_rx_port_rxfifo_overflow; /* dword 143*/ uint32_t rx_drops_too_many_frags; /* dword 144*/ uint32_t rx_drops_invalid_ring; /* dword 145*/ uint32_t forwarded_packets; /* dword 146*/ uint32_t rx_drops_mtu; /* dword 147*/ uint32_t rsvd0[7]; uint32_t port0_jabber_events; uint32_t port1_jabber_events; uint32_t rsvd1[6]; }; +struct oce_port_rxf_stats_v2 { + uint32_t rsvd0[10]; + uint32_t roce_bytes_received_lsd; + uint32_t roce_bytes_received_msd; + uint32_t rsvd1[5]; + uint32_t roce_frames_received; + uint32_t rx_crc_errors; + uint32_t rx_alignment_symbol_errors; + uint32_t rx_pause_frames; + uint32_t rx_priority_pause_frames; + uint32_t rx_control_frames; + uint32_t rx_in_range_errors; + uint32_t rx_out_range_errors; + uint32_t rx_frame_too_long; + uint32_t rx_address_match_errors; + uint32_t rx_dropped_too_small; + uint32_t rx_dropped_too_short; + uint32_t rx_dropped_header_too_small; + uint32_t rx_dropped_tcp_length; + uint32_t rx_dropped_runt; + uint32_t rsvd2[10]; + uint32_t rx_ip_checksum_errs; + uint32_t rx_tcp_checksum_errs; + uint32_t rx_udp_checksum_errs; + uint32_t rsvd3[7]; + uint32_t rx_switched_unicast_packets; + uint32_t rx_switched_multicast_packets; + uint32_t rx_switched_broadcast_packets; + uint32_t rsvd4[3]; + uint32_t tx_pauseframes; + uint32_t tx_priority_pauseframes; + uint32_t tx_controlframes; + uint32_t rsvd5[10]; + uint32_t rxpp_fifo_overflow_drop; + uint32_t rx_input_fifo_overflow_drop; + uint32_t pmem_fifo_overflow_drop; + uint32_t jabber_events; + uint32_t rsvd6[3]; + uint32_t rx_drops_payload_size; + uint32_t rx_drops_clipped_header; + uint32_t rx_drops_crc; + uint32_t roce_drops_payload_len; + uint32_t roce_drops_crc; + uint32_t rsvd7[19]; +}; + + struct oce_port_rxf_stats_v1 { uint32_t rsvd0[12]; uint32_t rx_crc_errors; uint32_t rx_alignment_symbol_errors; uint32_t rx_pause_frames; uint32_t rx_priority_pause_frames; uint32_t rx_control_frames; uint32_t rx_in_range_errors; uint32_t rx_out_range_errors; uint32_t rx_frame_too_long; uint32_t rx_address_match_errors; uint32_t rx_dropped_too_small; uint32_t rx_dropped_too_short; uint32_t rx_dropped_header_too_small; uint32_t rx_dropped_tcp_length; uint32_t rx_dropped_runt; uint32_t rsvd1[10]; uint32_t rx_ip_checksum_errs; uint32_t rx_tcp_checksum_errs; uint32_t rx_udp_checksum_errs; uint32_t rsvd2[7]; uint32_t rx_switched_unicast_packets; uint32_t rx_switched_multicast_packets; uint32_t rx_switched_broadcast_packets; uint32_t rsvd3[3]; uint32_t tx_pauseframes; uint32_t tx_priority_pauseframes; uint32_t tx_controlframes; uint32_t rsvd4[10]; uint32_t rxpp_fifo_overflow_drop; uint32_t rx_input_fifo_overflow_drop; uint32_t pmem_fifo_overflow_drop; uint32_t jabber_events; uint32_t rsvd5[3]; }; +struct oce_rxf_stats_v2 { + struct oce_port_rxf_stats_v2 port[4]; + uint32_t rsvd0[2]; + uint32_t rx_drops_no_pbuf; + uint32_t rx_drops_no_txpb; + uint32_t rx_drops_no_erx_descr; + uint32_t rx_drops_no_tpre_descr; + uint32_t rsvd1[6]; + uint32_t rx_drops_too_many_frags; + uint32_t rx_drops_invalid_ring; + uint32_t forwarded_packets; + uint32_t rx_drops_mtu; + uint32_t rsvd2[35]; +}; struct oce_rxf_stats_v1 { struct oce_port_rxf_stats_v1 port[4]; uint32_t rsvd0[2]; uint32_t rx_drops_no_pbuf; uint32_t rx_drops_no_txpb; uint32_t rx_drops_no_erx_descr; uint32_t rx_drops_no_tpre_descr; uint32_t rsvd1[6]; uint32_t rx_drops_too_many_frags; uint32_t rx_drops_invalid_ring; uint32_t forwarded_packets; uint32_t rx_drops_mtu; uint32_t rsvd2[14]; }; +struct oce_erx_stats_v2 { + uint32_t rx_drops_no_fragments[136]; + uint32_t rsvd[3]; +}; + struct oce_erx_stats_v1 { uint32_t rx_drops_no_fragments[68]; uint32_t rsvd[4]; }; struct oce_erx_stats_v0 { uint32_t rx_drops_no_fragments[44]; uint32_t rsvd[4]; }; struct oce_pmem_stats { uint32_t eth_red_drops; uint32_t rsvd[5]; }; +struct oce_hw_stats_v2 { + struct oce_rxf_stats_v2 rxf; + uint32_t rsvd0[OCE_TXP_SW_SZ]; + struct oce_erx_stats_v2 erx; + struct oce_pmem_stats pmem; + uint32_t rsvd1[18]; +}; + + struct oce_hw_stats_v1 { struct oce_rxf_stats_v1 rxf; uint32_t rsvd0[OCE_TXP_SW_SZ]; struct oce_erx_stats_v1 erx; struct oce_pmem_stats pmem; uint32_t rsvd1[18]; }; struct oce_hw_stats_v0 { struct oce_rxf_stats_v0 rxf; uint32_t rsvd[48]; struct oce_erx_stats_v0 erx; struct oce_pmem_stats pmem; }; -struct mbx_get_nic_stats_v0 { - struct mbx_hdr hdr; - union { - struct { - uint32_t rsvd0; - } req; +#define MBX_GET_NIC_STATS(version) \ + struct mbx_get_nic_stats_v##version { \ + struct mbx_hdr hdr; \ + union { \ + struct { \ + uint32_t rsvd0; \ + } req; \ + union { \ + struct oce_hw_stats_v##version stats; \ + } rsp; \ + } params; \ +} - union { - struct oce_hw_stats_v0 stats; - } rsp; - } params; -}; +MBX_GET_NIC_STATS(0); +MBX_GET_NIC_STATS(1); +MBX_GET_NIC_STATS(2); -struct mbx_get_nic_stats { - struct mbx_hdr hdr; - union { - struct { - uint32_t rsvd0; - } req; - - struct { - struct oce_hw_stats_v1 stats; - } rsp; - } params; -}; - - /* [18(0x12)] NIC_GET_PPORT_STATS */ struct pport_stats { uint64_t tx_pkts; uint64_t tx_unicast_pkts; uint64_t tx_multicast_pkts; uint64_t tx_broadcast_pkts; uint64_t tx_bytes; uint64_t tx_unicast_bytes; uint64_t tx_multicast_bytes; uint64_t tx_broadcast_bytes; uint64_t tx_discards; uint64_t tx_errors; uint64_t tx_pause_frames; uint64_t tx_pause_on_frames; uint64_t tx_pause_off_frames; uint64_t tx_internal_mac_errors; uint64_t tx_control_frames; uint64_t tx_pkts_64_bytes; uint64_t tx_pkts_65_to_127_bytes; uint64_t tx_pkts_128_to_255_bytes; uint64_t tx_pkts_256_to_511_bytes; uint64_t tx_pkts_512_to_1023_bytes; uint64_t tx_pkts_1024_to_1518_bytes; uint64_t tx_pkts_1519_to_2047_bytes; uint64_t tx_pkts_2048_to_4095_bytes; uint64_t tx_pkts_4096_to_8191_bytes; uint64_t tx_pkts_8192_to_9216_bytes; uint64_t tx_lso_pkts; uint64_t rx_pkts; uint64_t rx_unicast_pkts; uint64_t rx_multicast_pkts; uint64_t rx_broadcast_pkts; uint64_t rx_bytes; uint64_t rx_unicast_bytes; uint64_t rx_multicast_bytes; uint64_t rx_broadcast_bytes; uint32_t rx_unknown_protos; uint32_t reserved_word69; uint64_t rx_discards; uint64_t rx_errors; uint64_t rx_crc_errors; uint64_t rx_alignment_errors; uint64_t rx_symbol_errors; uint64_t rx_pause_frames; uint64_t rx_pause_on_frames; uint64_t rx_pause_off_frames; uint64_t rx_frames_too_long; uint64_t rx_internal_mac_errors; uint32_t rx_undersize_pkts; uint32_t rx_oversize_pkts; uint32_t rx_fragment_pkts; uint32_t rx_jabbers; uint64_t rx_control_frames; uint64_t rx_control_frames_unknown_opcode; uint32_t rx_in_range_errors; uint32_t rx_out_of_range_errors; uint32_t rx_address_match_errors; uint32_t rx_vlan_mismatch_errors; uint32_t rx_dropped_too_small; uint32_t rx_dropped_too_short; uint32_t rx_dropped_header_too_small; uint32_t rx_dropped_invalid_tcp_length; uint32_t rx_dropped_runt; uint32_t rx_ip_checksum_errors; uint32_t rx_tcp_checksum_errors; uint32_t rx_udp_checksum_errors; uint32_t rx_non_rss_pkts; uint64_t reserved_word111; uint64_t rx_ipv4_pkts; uint64_t rx_ipv6_pkts; uint64_t rx_ipv4_bytes; uint64_t rx_ipv6_bytes; uint64_t rx_nic_pkts; uint64_t rx_tcp_pkts; uint64_t rx_iscsi_pkts; uint64_t rx_management_pkts; uint64_t rx_switched_unicast_pkts; uint64_t rx_switched_multicast_pkts; uint64_t rx_switched_broadcast_pkts; uint64_t num_forwards; uint32_t rx_fifo_overflow; uint32_t rx_input_fifo_overflow; uint64_t rx_drops_too_many_frags; uint32_t rx_drops_invalid_queue; uint32_t reserved_word141; uint64_t rx_drops_mtu; uint64_t rx_pkts_64_bytes; uint64_t rx_pkts_65_to_127_bytes; uint64_t rx_pkts_128_to_255_bytes; uint64_t rx_pkts_256_to_511_bytes; uint64_t rx_pkts_512_to_1023_bytes; uint64_t rx_pkts_1024_to_1518_bytes; uint64_t rx_pkts_1519_to_2047_bytes; uint64_t rx_pkts_2048_to_4095_bytes; uint64_t rx_pkts_4096_to_8191_bytes; uint64_t rx_pkts_8192_to_9216_bytes; }; struct mbx_get_pport_stats { /* dw0 - dw3 */ struct mbx_hdr hdr; union { struct { /* dw4 */ #ifdef _BIG_ENDIAN uint32_t reset_stats:8; uint32_t rsvd0:8; uint32_t port_number:16; #else uint32_t port_number:16; uint32_t rsvd0:8; uint32_t reset_stats:8; #endif } req; union { struct pport_stats pps; uint32_t pport_stats[164 - 4 + 1]; } rsp; } params; }; /* [19(0x13)] NIC_GET_VPORT_STATS */ struct vport_stats { uint64_t tx_pkts; uint64_t tx_unicast_pkts; uint64_t tx_multicast_pkts; uint64_t tx_broadcast_pkts; uint64_t tx_bytes; uint64_t tx_unicast_bytes; uint64_t tx_multicast_bytes; uint64_t tx_broadcast_bytes; uint64_t tx_discards; uint64_t tx_errors; uint64_t tx_pkts_64_bytes; uint64_t tx_pkts_65_to_127_bytes; uint64_t tx_pkts_128_to_255_bytes; uint64_t tx_pkts_256_to_511_bytes; uint64_t tx_pkts_512_to_1023_bytes; uint64_t tx_pkts_1024_to_1518_bytes; uint64_t tx_pkts_1519_to_9699_bytes; uint64_t tx_pkts_over_9699_bytes; uint64_t rx_pkts; uint64_t rx_unicast_pkts; uint64_t rx_multicast_pkts; uint64_t rx_broadcast_pkts; uint64_t rx_bytes; uint64_t rx_unicast_bytes; uint64_t rx_multicast_bytes; uint64_t rx_broadcast_bytes; uint64_t rx_discards; uint64_t rx_errors; uint64_t rx_pkts_64_bytes; uint64_t rx_pkts_65_to_127_bytes; uint64_t rx_pkts_128_to_255_bytes; uint64_t rx_pkts_256_to_511_bytes; uint64_t rx_pkts_512_to_1023_bytes; uint64_t rx_pkts_1024_to_1518_bytes; uint64_t rx_pkts_1519_to_9699_bytes; uint64_t rx_pkts_gt_9699_bytes; }; struct mbx_get_vport_stats { /* dw0 - dw3 */ struct mbx_hdr hdr; union { struct { /* dw4 */ #ifdef _BIG_ENDIAN uint32_t reset_stats:8; uint32_t rsvd0:8; uint32_t vport_number:16; #else uint32_t vport_number:16; uint32_t rsvd0:8; uint32_t reset_stats:8; #endif } req; union { struct vport_stats vps; uint32_t vport_stats[75 - 4 + 1]; } rsp; } params; }; /** * @brief [20(0x14)] NIC_GET_QUEUE_STATS * The significant difference between vPort and Queue statistics is * the packet byte counters. */ struct queue_stats { uint64_t packets; uint64_t bytes; uint64_t errors; uint64_t drops; uint64_t buffer_errors; /* rsvd when tx */ }; #define QUEUE_TYPE_WQ 0 #define QUEUE_TYPE_RQ 1 #define QUEUE_TYPE_HDS_RQ 1 /* same as RQ */ struct mbx_get_queue_stats { /* dw0 - dw3 */ struct mbx_hdr hdr; union { struct { /* dw4 */ #ifdef _BIG_ENDIAN uint32_t reset_stats:8; uint32_t queue_type:8; uint32_t queue_id:16; #else uint32_t queue_id:16; uint32_t queue_type:8; uint32_t reset_stats:8; #endif } req; union { struct queue_stats qs; uint32_t queue_stats[13 - 4 + 1]; } rsp; } params; }; /* [01] NIC_CONFIG_RSS */ #define OCE_HASH_TBL_SZ 10 #define OCE_CPU_TBL_SZ 128 #define OCE_FLUSH 1 /* RSS flush completion per CQ port */ struct mbx_config_nic_rss { struct mbx_hdr hdr; union { struct { #ifdef _BIG_ENDIAN uint32_t if_id; uint16_t cpu_tbl_sz_log2; uint16_t enable_rss; uint32_t hash[OCE_HASH_TBL_SZ]; uint8_t cputable[OCE_CPU_TBL_SZ]; uint8_t rsvd[3]; uint8_t flush; #else uint32_t if_id; uint16_t enable_rss; uint16_t cpu_tbl_sz_log2; uint32_t hash[OCE_HASH_TBL_SZ]; uint8_t cputable[OCE_CPU_TBL_SZ]; uint8_t flush; uint8_t rsvd[3]; #endif } req; struct { uint8_t rsvd[3]; uint8_t rss_bank; } rsp; } params; }; #pragma pack() typedef uint32_t oce_stat_t; /* statistic counter */ enum OCE_RXF_PORT_STATS { RXF_RX_BYTES_LSD, RXF_RX_BYTES_MSD, RXF_RX_TOTAL_FRAMES, RXF_RX_UNICAST_FRAMES, RXF_RX_MULTICAST_FRAMES, RXF_RX_BROADCAST_FRAMES, RXF_RX_CRC_ERRORS, RXF_RX_ALIGNMENT_SYMBOL_ERRORS, RXF_RX_PAUSE_FRAMES, RXF_RX_CONTROL_FRAMES, RXF_RX_IN_RANGE_ERRORS, RXF_RX_OUT_RANGE_ERRORS, RXF_RX_FRAME_TOO_LONG, RXF_RX_ADDRESS_MATCH_ERRORS, RXF_RX_VLAN_MISMATCH, RXF_RX_DROPPED_TOO_SMALL, RXF_RX_DROPPED_TOO_SHORT, RXF_RX_DROPPED_HEADER_TOO_SMALL, RXF_RX_DROPPED_TCP_LENGTH, RXF_RX_DROPPED_RUNT, RXF_RX_64_BYTE_PACKETS, RXF_RX_65_127_BYTE_PACKETS, RXF_RX_128_256_BYTE_PACKETS, RXF_RX_256_511_BYTE_PACKETS, RXF_RX_512_1023_BYTE_PACKETS, RXF_RX_1024_1518_BYTE_PACKETS, RXF_RX_1519_2047_BYTE_PACKETS, RXF_RX_2048_4095_BYTE_PACKETS, RXF_RX_4096_8191_BYTE_PACKETS, RXF_RX_8192_9216_BYTE_PACKETS, RXF_RX_IP_CHECKSUM_ERRS, RXF_RX_TCP_CHECKSUM_ERRS, RXF_RX_UDP_CHECKSUM_ERRS, RXF_RX_NON_RSS_PACKETS, RXF_RX_IPV4_PACKETS, RXF_RX_IPV6_PACKETS, RXF_RX_IPV4_BYTES_LSD, RXF_RX_IPV4_BYTES_MSD, RXF_RX_IPV6_BYTES_LSD, RXF_RX_IPV6_BYTES_MSD, RXF_RX_CHUTE1_PACKETS, RXF_RX_CHUTE2_PACKETS, RXF_RX_CHUTE3_PACKETS, RXF_RX_MANAGEMENT_PACKETS, RXF_RX_SWITCHED_UNICAST_PACKETS, RXF_RX_SWITCHED_MULTICAST_PACKETS, RXF_RX_SWITCHED_BROADCAST_PACKETS, RXF_TX_BYTES_LSD, RXF_TX_BYTES_MSD, RXF_TX_UNICAST_FRAMES, RXF_TX_MULTICAST_FRAMES, RXF_TX_BROADCAST_FRAMES, RXF_TX_PAUSE_FRAMES, RXF_TX_CONTROL_FRAMES, RXF_TX_64_BYTE_PACKETS, RXF_TX_65_127_BYTE_PACKETS, RXF_TX_128_256_BYTE_PACKETS, RXF_TX_256_511_BYTE_PACKETS, RXF_TX_512_1023_BYTE_PACKETS, RXF_TX_1024_1518_BYTE_PACKETS, RXF_TX_1519_2047_BYTE_PACKETS, RXF_TX_2048_4095_BYTE_PACKETS, RXF_TX_4096_8191_BYTE_PACKETS, RXF_TX_8192_9216_BYTE_PACKETS, RXF_RX_FIFO_OVERFLOW, RXF_RX_INPUT_FIFO_OVERFLOW, RXF_PORT_STATS_N_WORDS }; enum OCE_RXF_ADDL_STATS { RXF_RX_DROPS_NO_PBUF, RXF_RX_DROPS_NO_TXPB, RXF_RX_DROPS_NO_ERX_DESCR, RXF_RX_DROPS_NO_TPRE_DESCR, RXF_MANAGEMENT_RX_PORT_PACKETS, RXF_MANAGEMENT_RX_PORT_BYTES, RXF_MANAGEMENT_RX_PORT_PAUSE_FRAMES, RXF_MANAGEMENT_RX_PORT_ERRORS, RXF_MANAGEMENT_TX_PORT_PACKETS, RXF_MANAGEMENT_TX_PORT_BYTES, RXF_MANAGEMENT_TX_PORT_PAUSE, RXF_MANAGEMENT_RX_PORT_RXFIFO_OVERFLOW, RXF_RX_DROPS_TOO_MANY_FRAGS, RXF_RX_DROPS_INVALID_RING, RXF_FORWARDED_PACKETS, RXF_RX_DROPS_MTU, RXF_ADDL_STATS_N_WORDS }; enum OCE_TX_CHUTE_PORT_STATS { CTPT_XMT_IPV4_PKTS, CTPT_XMT_IPV4_LSD, CTPT_XMT_IPV4_MSD, CTPT_XMT_IPV6_PKTS, CTPT_XMT_IPV6_LSD, CTPT_XMT_IPV6_MSD, CTPT_REXMT_IPV4_PKTs, CTPT_REXMT_IPV4_LSD, CTPT_REXMT_IPV4_MSD, CTPT_REXMT_IPV6_PKTs, CTPT_REXMT_IPV6_LSD, CTPT_REXMT_IPV6_MSD, CTPT_N_WORDS, }; enum OCE_RX_ERR_STATS { RX_DROPS_NO_FRAGMENTS_0, RX_DROPS_NO_FRAGMENTS_1, RX_DROPS_NO_FRAGMENTS_2, RX_DROPS_NO_FRAGMENTS_3, RX_DROPS_NO_FRAGMENTS_4, RX_DROPS_NO_FRAGMENTS_5, RX_DROPS_NO_FRAGMENTS_6, RX_DROPS_NO_FRAGMENTS_7, RX_DROPS_NO_FRAGMENTS_8, RX_DROPS_NO_FRAGMENTS_9, RX_DROPS_NO_FRAGMENTS_10, RX_DROPS_NO_FRAGMENTS_11, RX_DROPS_NO_FRAGMENTS_12, RX_DROPS_NO_FRAGMENTS_13, RX_DROPS_NO_FRAGMENTS_14, RX_DROPS_NO_FRAGMENTS_15, RX_DROPS_NO_FRAGMENTS_16, RX_DROPS_NO_FRAGMENTS_17, RX_DROPS_NO_FRAGMENTS_18, RX_DROPS_NO_FRAGMENTS_19, RX_DROPS_NO_FRAGMENTS_20, RX_DROPS_NO_FRAGMENTS_21, RX_DROPS_NO_FRAGMENTS_22, RX_DROPS_NO_FRAGMENTS_23, RX_DROPS_NO_FRAGMENTS_24, RX_DROPS_NO_FRAGMENTS_25, RX_DROPS_NO_FRAGMENTS_26, RX_DROPS_NO_FRAGMENTS_27, RX_DROPS_NO_FRAGMENTS_28, RX_DROPS_NO_FRAGMENTS_29, RX_DROPS_NO_FRAGMENTS_30, RX_DROPS_NO_FRAGMENTS_31, RX_DROPS_NO_FRAGMENTS_32, RX_DROPS_NO_FRAGMENTS_33, RX_DROPS_NO_FRAGMENTS_34, RX_DROPS_NO_FRAGMENTS_35, RX_DROPS_NO_FRAGMENTS_36, RX_DROPS_NO_FRAGMENTS_37, RX_DROPS_NO_FRAGMENTS_38, RX_DROPS_NO_FRAGMENTS_39, RX_DROPS_NO_FRAGMENTS_40, RX_DROPS_NO_FRAGMENTS_41, RX_DROPS_NO_FRAGMENTS_42, RX_DROPS_NO_FRAGMENTS_43, RX_DEBUG_WDMA_SENT_HOLD, RX_DEBUG_WDMA_PBFREE_SENT_HOLD, RX_DEBUG_WDMA_0B_PBFREE_SENT_HOLD, RX_DEBUG_PMEM_PBUF_DEALLOC, RX_ERRORS_N_WORDS }; enum OCE_PMEM_ERR_STATS { PMEM_ETH_RED_DROPS, PMEM_LRO_RED_DROPS, PMEM_ULP0_RED_DROPS, PMEM_ULP1_RED_DROPS, PMEM_GLOBAL_RED_DROPS, PMEM_ERRORS_N_WORDS }; /** * @brief Statistics for a given Physical Port * These satisfy all the required BE2 statistics and also the * following MIB objects: * * RFC 2863 - The Interfaces Group MIB * RFC 2819 - Remote Network Monitoring Management Information Base (RMON) * RFC 3635 - Managed Objects for the Ethernet-like Interface Types * RFC 4502 - Remote Network Monitoring Mgmt Information Base Ver-2 (RMON2) * */ enum OCE_PPORT_STATS { PPORT_TX_PKTS = 0, PPORT_TX_UNICAST_PKTS = 2, PPORT_TX_MULTICAST_PKTS = 4, PPORT_TX_BROADCAST_PKTS = 6, PPORT_TX_BYTES = 8, PPORT_TX_UNICAST_BYTES = 10, PPORT_TX_MULTICAST_BYTES = 12, PPORT_TX_BROADCAST_BYTES = 14, PPORT_TX_DISCARDS = 16, PPORT_TX_ERRORS = 18, PPORT_TX_PAUSE_FRAMES = 20, PPORT_TX_PAUSE_ON_FRAMES = 22, PPORT_TX_PAUSE_OFF_FRAMES = 24, PPORT_TX_INTERNAL_MAC_ERRORS = 26, PPORT_TX_CONTROL_FRAMES = 28, PPORT_TX_PKTS_64_BYTES = 30, PPORT_TX_PKTS_65_TO_127_BYTES = 32, PPORT_TX_PKTS_128_TO_255_BYTES = 34, PPORT_TX_PKTS_256_TO_511_BYTES = 36, PPORT_TX_PKTS_512_TO_1023_BYTES = 38, PPORT_TX_PKTS_1024_TO_1518_BYTES = 40, PPORT_TX_PKTS_1519_TO_2047_BYTES = 42, PPORT_TX_PKTS_2048_TO_4095_BYTES = 44, PPORT_TX_PKTS_4096_TO_8191_BYTES = 46, PPORT_TX_PKTS_8192_TO_9216_BYTES = 48, PPORT_TX_LSO_PKTS = 50, PPORT_RX_PKTS = 52, PPORT_RX_UNICAST_PKTS = 54, PPORT_RX_MULTICAST_PKTS = 56, PPORT_RX_BROADCAST_PKTS = 58, PPORT_RX_BYTES = 60, PPORT_RX_UNICAST_BYTES = 62, PPORT_RX_MULTICAST_BYTES = 64, PPORT_RX_BROADCAST_BYTES = 66, PPORT_RX_UNKNOWN_PROTOS = 68, PPORT_RESERVED_WORD69 = 69, PPORT_RX_DISCARDS = 70, PPORT_RX_ERRORS = 72, PPORT_RX_CRC_ERRORS = 74, PPORT_RX_ALIGNMENT_ERRORS = 76, PPORT_RX_SYMBOL_ERRORS = 78, PPORT_RX_PAUSE_FRAMES = 80, PPORT_RX_PAUSE_ON_FRAMES = 82, PPORT_RX_PAUSE_OFF_FRAMES = 84, PPORT_RX_FRAMES_TOO_LONG = 86, PPORT_RX_INTERNAL_MAC_ERRORS = 88, PPORT_RX_UNDERSIZE_PKTS = 90, PPORT_RX_OVERSIZE_PKTS = 91, PPORT_RX_FRAGMENT_PKTS = 92, PPORT_RX_JABBERS = 93, PPORT_RX_CONTROL_FRAMES = 94, PPORT_RX_CONTROL_FRAMES_UNK_OPCODE = 96, PPORT_RX_IN_RANGE_ERRORS = 98, PPORT_RX_OUT_OF_RANGE_ERRORS = 99, PPORT_RX_ADDRESS_MATCH_ERRORS = 100, PPORT_RX_VLAN_MISMATCH_ERRORS = 101, PPORT_RX_DROPPED_TOO_SMALL = 102, PPORT_RX_DROPPED_TOO_SHORT = 103, PPORT_RX_DROPPED_HEADER_TOO_SMALL = 104, PPORT_RX_DROPPED_INVALID_TCP_LENGTH = 105, PPORT_RX_DROPPED_RUNT = 106, PPORT_RX_IP_CHECKSUM_ERRORS = 107, PPORT_RX_TCP_CHECKSUM_ERRORS = 108, PPORT_RX_UDP_CHECKSUM_ERRORS = 109, PPORT_RX_NON_RSS_PKTS = 110, PPORT_RESERVED_WORD111 = 111, PPORT_RX_IPV4_PKTS = 112, PPORT_RX_IPV6_PKTS = 114, PPORT_RX_IPV4_BYTES = 116, PPORT_RX_IPV6_BYTES = 118, PPORT_RX_NIC_PKTS = 120, PPORT_RX_TCP_PKTS = 122, PPORT_RX_ISCSI_PKTS = 124, PPORT_RX_MANAGEMENT_PKTS = 126, PPORT_RX_SWITCHED_UNICAST_PKTS = 128, PPORT_RX_SWITCHED_MULTICAST_PKTS = 130, PPORT_RX_SWITCHED_BROADCAST_PKTS = 132, PPORT_NUM_FORWARDS = 134, PPORT_RX_FIFO_OVERFLOW = 136, PPORT_RX_INPUT_FIFO_OVERFLOW = 137, PPORT_RX_DROPS_TOO_MANY_FRAGS = 138, PPORT_RX_DROPS_INVALID_QUEUE = 140, PPORT_RESERVED_WORD141 = 141, PPORT_RX_DROPS_MTU = 142, PPORT_RX_PKTS_64_BYTES = 144, PPORT_RX_PKTS_65_TO_127_BYTES = 146, PPORT_RX_PKTS_128_TO_255_BYTES = 148, PPORT_RX_PKTS_256_TO_511_BYTES = 150, PPORT_RX_PKTS_512_TO_1023_BYTES = 152, PPORT_RX_PKTS_1024_TO_1518_BYTES = 154, PPORT_RX_PKTS_1519_TO_2047_BYTES = 156, PPORT_RX_PKTS_2048_TO_4095_BYTES = 158, PPORT_RX_PKTS_4096_TO_8191_BYTES = 160, PPORT_RX_PKTS_8192_TO_9216_BYTES = 162, PPORT_N_WORDS = 164 }; /** * @brief Statistics for a given Virtual Port (vPort) * The following describes the vPort statistics satisfying * requirements of Linux/VMWare netdev statistics and * Microsoft Windows Statistics along with other Operating Systems. */ enum OCE_VPORT_STATS { VPORT_TX_PKTS = 0, VPORT_TX_UNICAST_PKTS = 2, VPORT_TX_MULTICAST_PKTS = 4, VPORT_TX_BROADCAST_PKTS = 6, VPORT_TX_BYTES = 8, VPORT_TX_UNICAST_BYTES = 10, VPORT_TX_MULTICAST_BYTES = 12, VPORT_TX_BROADCAST_BYTES = 14, VPORT_TX_DISCARDS = 16, VPORT_TX_ERRORS = 18, VPORT_TX_PKTS_64_BYTES = 20, VPORT_TX_PKTS_65_TO_127_BYTES = 22, VPORT_TX_PKTS_128_TO_255_BYTES = 24, VPORT_TX_PKTS_256_TO_511_BYTES = 26, VPORT_TX_PKTS_512_TO_1023_BYTEs = 28, VPORT_TX_PKTS_1024_TO_1518_BYTEs = 30, VPORT_TX_PKTS_1519_TO_9699_BYTEs = 32, VPORT_TX_PKTS_OVER_9699_BYTES = 34, VPORT_RX_PKTS = 36, VPORT_RX_UNICAST_PKTS = 38, VPORT_RX_MULTICAST_PKTS = 40, VPORT_RX_BROADCAST_PKTS = 42, VPORT_RX_BYTES = 44, VPORT_RX_UNICAST_BYTES = 46, VPORT_RX_MULTICAST_BYTES = 48, VPORT_RX_BROADCAST_BYTES = 50, VPORT_RX_DISCARDS = 52, VPORT_RX_ERRORS = 54, VPORT_RX_PKTS_64_BYTES = 56, VPORT_RX_PKTS_65_TO_127_BYTES = 58, VPORT_RX_PKTS_128_TO_255_BYTES = 60, VPORT_RX_PKTS_256_TO_511_BYTES = 62, VPORT_RX_PKTS_512_TO_1023_BYTEs = 64, VPORT_RX_PKTS_1024_TO_1518_BYTEs = 66, VPORT_RX_PKTS_1519_TO_9699_BYTEs = 68, VPORT_RX_PKTS_OVER_9699_BYTES = 70, VPORT_N_WORDS = 72 }; /** * @brief Statistics for a given queue (NIC WQ, RQ, or HDS RQ) * This set satisfies requirements of VMQare NetQueue and Microsoft VMQ */ enum OCE_QUEUE_TX_STATS { QUEUE_TX_PKTS = 0, QUEUE_TX_BYTES = 2, QUEUE_TX_ERRORS = 4, QUEUE_TX_DROPS = 6, QUEUE_TX_N_WORDS = 8 }; enum OCE_QUEUE_RX_STATS { QUEUE_RX_PKTS = 0, QUEUE_RX_BYTES = 2, QUEUE_RX_ERRORS = 4, QUEUE_RX_DROPS = 6, QUEUE_RX_BUFFER_ERRORS = 8, QUEUE_RX_N_WORDS = 10 +}; + +/* HW LRO structures */ +struct mbx_nic_query_lro_capabilities { + struct mbx_hdr hdr; + union { + struct { + uint32_t rsvd[6]; + } req; + struct { +#ifdef _BIG_ENDIAN + uint32_t lro_flags; + uint16_t lro_rq_cnt; + uint16_t plro_max_offload; + uint32_t rsvd[4]; +#else + uint32_t lro_flags; + uint16_t plro_max_offload; + uint16_t lro_rq_cnt; + uint32_t rsvd[4]; +#endif + } rsp; + } params; +}; + +struct mbx_nic_set_iface_lro_config { + struct mbx_hdr hdr; + union { + struct { +#ifdef _BIG_ENDIAN + uint32_t lro_flags; + uint32_t iface_id; + uint32_t max_clsc_byte_cnt; + uint32_t max_clsc_seg_cnt; + uint32_t max_clsc_usec_delay; + uint32_t min_clsc_frame_byte_cnt; + uint32_t rsvd[2]; +#else + uint32_t lro_flags; + uint32_t iface_id; + uint32_t max_clsc_byte_cnt; + uint32_t max_clsc_seg_cnt; + uint32_t max_clsc_usec_delay; + uint32_t min_clsc_frame_byte_cnt; + uint32_t rsvd[2]; +#endif + } req; + struct { +#ifdef _BIG_ENDIAN + uint32_t lro_flags; + uint32_t rsvd[7]; +#else + uint32_t lro_flags; + uint32_t rsvd[7]; +#endif + } rsp; + } params; +}; + + +struct mbx_create_nic_rq_v2 { + struct mbx_hdr hdr; + union { + struct { +#ifdef _BIG_ENDIAN + uint8_t num_pages; + uint8_t frag_size; + uint16_t cq_id; + + uint32_t if_id; + + uint16_t page_size; + uint16_t max_frame_size; + + uint16_t rsvd; + uint16_t pd_id; + + uint16_t rsvd1; + uint16_t rq_flags; + + uint16_t hds_fixed_offset; + uint8_t hds_start; + uint8_t hds_frag; + + uint16_t hds_backfill_size; + uint16_t hds_frag_size; + + uint32_t rbq_id; + + uint32_t rsvd2[8]; + + struct phys_addr pages[2]; +#else + uint16_t cq_id; + uint8_t frag_size; + uint8_t num_pages; + + uint32_t if_id; + + uint16_t max_frame_size; + uint16_t page_size; + + uint16_t pd_id; + uint16_t rsvd; + + uint16_t rq_flags; + uint16_t rsvd1; + + uint8_t hds_frag; + uint8_t hds_start; + uint16_t hds_fixed_offset; + + uint16_t hds_frag_size; + uint16_t hds_backfill_size; + + uint32_t rbq_id; + + uint32_t rsvd2[8]; + + struct phys_addr pages[2]; +#endif + } req; + struct { +#ifdef _BIG_ENDIAN + uint8_t rsvd0; + uint8_t rss_cpuid; + uint16_t rq_id; + + uint8_t db_format; + uint8_t db_reg_set; + uint16_t rsvd1; + + uint32_t db_offset; + + uint32_t rsvd2; + + uint16_t rsvd3; + uint16_t rq_flags; + +#else + uint16_t rq_id; + uint8_t rss_cpuid; + uint8_t rsvd0; + + uint16_t rsvd1; + uint8_t db_reg_set; + uint8_t db_format; + + uint32_t db_offset; + + uint32_t rsvd2; + + uint16_t rq_flags; + uint16_t rsvd3; +#endif + } rsp; + + } params; +}; + +struct mbx_delete_nic_rq_v1 { + struct mbx_hdr hdr; + union { + struct { +#ifdef _BIG_ENDIAN + uint16_t bypass_flush; + uint16_t rq_id; + uint16_t rsvd; + uint16_t rq_flags; +#else + uint16_t rq_id; + uint16_t bypass_flush; + uint16_t rq_flags; + uint16_t rsvd; +#endif + } req; + struct { + uint32_t rsvd[2]; + } rsp; + } params; +}; + +struct nic_hwlro_singleton_cqe { +#ifdef _BIG_ENDIAN + /* dw 0 */ + uint32_t ip_opt:1; + uint32_t vtp:1; + uint32_t pkt_size:14; + uint32_t vlan_tag:16; + + /* dw 1 */ + uint32_t num_frags:3; + uint32_t rsvd1:3; + uint32_t frag_index:10; + uint32_t rsvd:8; + uint32_t ipv6_frame:1; + uint32_t l4_cksum_pass:1; + uint32_t ip_cksum_pass:1; + uint32_t udpframe:1; + uint32_t tcpframe:1; + uint32_t ipframe:1; + uint32_t rss_hp:1; + uint32_t error:1; + + /* dw 2 */ + uint32_t valid:1; + uint32_t cqe_type:2; + uint32_t debug:7; + uint32_t rsvd4:6; + uint32_t data_offset:8; + uint32_t rsvd3:3; + uint32_t rss_bank:1; + uint32_t qnq:1; + uint32_t rsvd2:3; + + /* dw 3 */ + uint32_t rss_hash_value; +#else + /* dw 0 */ + uint32_t vlan_tag:16; + uint32_t pkt_size:14; + uint32_t vtp:1; + uint32_t ip_opt:1; + + /* dw 1 */ + uint32_t error:1; + uint32_t rss_hp:1; + uint32_t ipframe:1; + uint32_t tcpframe:1; + uint32_t udpframe:1; + uint32_t ip_cksum_pass:1; + uint32_t l4_cksum_pass:1; + uint32_t ipv6_frame:1; + uint32_t rsvd:8; + uint32_t frag_index:10; + uint32_t rsvd1:3; + uint32_t num_frags:3; + + /* dw 2 */ + uint32_t rsvd2:3; + uint32_t qnq:1; + uint32_t rss_bank:1; + uint32_t rsvd3:3; + uint32_t data_offset:8; + uint32_t rsvd4:6; + uint32_t debug:7; + uint32_t cqe_type:2; + uint32_t valid:1; + + /* dw 3 */ + uint32_t rss_hash_value; +#endif +}; + +struct nic_hwlro_cqe_part1 { +#ifdef _BIG_ENDIAN + /* dw 0 */ + uint32_t tcp_timestamp_val; + + /* dw 1 */ + uint32_t tcp_timestamp_ecr; + + /* dw 2 */ + uint32_t valid:1; + uint32_t cqe_type:2; + uint32_t rsvd3:7; + uint32_t rss_policy:4; + uint32_t rsvd2:2; + uint32_t data_offset:8; + uint32_t rsvd1:1; + uint32_t lro_desc:1; + uint32_t lro_timer_pop:1; + uint32_t rss_bank:1; + uint32_t qnq:1; + uint32_t rsvd:2; + uint32_t rss_flush:1; + + /* dw 3 */ + uint32_t rss_hash_value; +#else + /* dw 0 */ + uint32_t tcp_timestamp_val; + + /* dw 1 */ + uint32_t tcp_timestamp_ecr; + + /* dw 2 */ + uint32_t rss_flush:1; + uint32_t rsvd:2; + uint32_t qnq:1; + uint32_t rss_bank:1; + uint32_t lro_timer_pop:1; + uint32_t lro_desc:1; + uint32_t rsvd1:1; + uint32_t data_offset:8; + uint32_t rsvd2:2; + uint32_t rss_policy:4; + uint32_t rsvd3:7; + uint32_t cqe_type:2; + uint32_t valid:1; + + /* dw 3 */ + uint32_t rss_hash_value; +#endif +}; + +struct nic_hwlro_cqe_part2 { +#ifdef _BIG_ENDIAN + /* dw 0 */ + uint32_t ip_opt:1; + uint32_t vtp:1; + uint32_t pkt_size:14; + uint32_t vlan_tag:16; + + /* dw 1 */ + uint32_t tcp_window:16; + uint32_t coalesced_size:16; + + /* dw 2 */ + uint32_t valid:1; + uint32_t cqe_type:2; + uint32_t rsvd:2; + uint32_t push:1; + uint32_t ts_opt:1; + uint32_t threshold:1; + uint32_t seg_cnt:8; + uint32_t frame_lifespan:8; + uint32_t ipv6_frame:1; + uint32_t l4_cksum_pass:1; + uint32_t ip_cksum_pass:1; + uint32_t udpframe:1; + uint32_t tcpframe:1; + uint32_t ipframe:1; + uint32_t rss_hp:1; + uint32_t error:1; + + /* dw 3 */ + uint32_t tcp_ack_num; +#else + /* dw 0 */ + uint32_t vlan_tag:16; + uint32_t pkt_size:14; + uint32_t vtp:1; + uint32_t ip_opt:1; + + /* dw 1 */ + uint32_t coalesced_size:16; + uint32_t tcp_window:16; + + /* dw 2 */ + uint32_t error:1; + uint32_t rss_hp:1; + uint32_t ipframe:1; + uint32_t tcpframe:1; + uint32_t udpframe:1; + uint32_t ip_cksum_pass:1; + uint32_t l4_cksum_pass:1; + uint32_t ipv6_frame:1; + uint32_t frame_lifespan:8; + uint32_t seg_cnt:8; + uint32_t threshold:1; + uint32_t ts_opt:1; + uint32_t push:1; + uint32_t rsvd:2; + uint32_t cqe_type:2; + uint32_t valid:1; + + /* dw 3 */ + uint32_t tcp_ack_num; +#endif }; Index: stable/11/sys/dev/oce/oce_if.c =================================================================== --- stable/11/sys/dev/oce/oce_if.c (revision 338937) +++ stable/11/sys/dev/oce/oce_if.c (revision 338938) @@ -1,2355 +1,2993 @@ /*- * Copyright (C) 2013 Emulex * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Emulex Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * freebsd-drivers@emulex.com * * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 */ /* $FreeBSD$ */ #include "opt_inet6.h" #include "opt_inet.h" #include "oce_if.h" +#include "oce_user.h" +#define is_tso_pkt(m) (m->m_pkthdr.csum_flags & CSUM_TSO) + /* UE Status Low CSR */ static char *ue_status_low_desc[] = { - "CEV", - "CTX", - "DBUF", - "ERX", - "Host", - "MPU", - "NDMA", - "PTC ", - "RDMA ", - "RXF ", - "RXIPS ", - "RXULP0 ", - "RXULP1 ", - "RXULP2 ", - "TIM ", - "TPOST ", - "TPRE ", - "TXIPS ", - "TXULP0 ", - "TXULP1 ", - "UC ", - "WDMA ", - "TXULP2 ", - "HOST1 ", - "P0_OB_LINK ", - "P1_OB_LINK ", - "HOST_GPIO ", - "MBOX ", - "AXGMAC0", - "AXGMAC1", - "JTAG", - "MPU_INTPEND" + "CEV", + "CTX", + "DBUF", + "ERX", + "Host", + "MPU", + "NDMA", + "PTC ", + "RDMA ", + "RXF ", + "RXIPS ", + "RXULP0 ", + "RXULP1 ", + "RXULP2 ", + "TIM ", + "TPOST ", + "TPRE ", + "TXIPS ", + "TXULP0 ", + "TXULP1 ", + "UC ", + "WDMA ", + "TXULP2 ", + "HOST1 ", + "P0_OB_LINK ", + "P1_OB_LINK ", + "HOST_GPIO ", + "MBOX ", + "AXGMAC0", + "AXGMAC1", + "JTAG", + "MPU_INTPEND" }; /* UE Status High CSR */ static char *ue_status_hi_desc[] = { - "LPCMEMHOST", - "MGMT_MAC", - "PCS0ONLINE", - "MPU_IRAM", - "PCS1ONLINE", - "PCTL0", - "PCTL1", - "PMEM", - "RR", - "TXPB", - "RXPP", - "XAUI", - "TXP", - "ARM", - "IPC", - "HOST2", - "HOST3", - "HOST4", - "HOST5", - "HOST6", - "HOST7", - "HOST8", - "HOST9", - "NETC", - "Unknown", - "Unknown", - "Unknown", - "Unknown", - "Unknown", - "Unknown", - "Unknown", - "Unknown" + "LPCMEMHOST", + "MGMT_MAC", + "PCS0ONLINE", + "MPU_IRAM", + "PCS1ONLINE", + "PCTL0", + "PCTL1", + "PMEM", + "RR", + "TXPB", + "RXPP", + "XAUI", + "TXP", + "ARM", + "IPC", + "HOST2", + "HOST3", + "HOST4", + "HOST5", + "HOST6", + "HOST7", + "HOST8", + "HOST9", + "NETC", + "Unknown", + "Unknown", + "Unknown", + "Unknown", + "Unknown", + "Unknown", + "Unknown", + "Unknown" }; +struct oce_common_cqe_info{ + uint8_t vtp:1; + uint8_t l4_cksum_pass:1; + uint8_t ip_cksum_pass:1; + uint8_t ipv6_frame:1; + uint8_t qnq:1; + uint8_t rsvd:3; + uint8_t num_frags; + uint16_t pkt_size; + uint16_t vtag; +}; + /* Driver entry points prototypes */ static int oce_probe(device_t dev); static int oce_attach(device_t dev); static int oce_detach(device_t dev); static int oce_shutdown(device_t dev); static int oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data); static void oce_init(void *xsc); static int oce_multiq_start(struct ifnet *ifp, struct mbuf *m); static void oce_multiq_flush(struct ifnet *ifp); /* Driver interrupt routines protypes */ static void oce_intr(void *arg, int pending); static int oce_setup_intr(POCE_SOFTC sc); static int oce_fast_isr(void *arg); static int oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending)); /* Media callbacks prototypes */ static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req); static int oce_media_change(struct ifnet *ifp); /* Transmit routines prototypes */ static int oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index); static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq); -static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, - uint32_t status); +static void oce_process_tx_completion(struct oce_wq *wq); static int oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq); /* Receive routines prototypes */ -static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe); static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe); static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe); -static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx, - struct oce_nic_rx_cqe *cqe); +static void oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe); +static void oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq); +static uint16_t oce_rq_handler_lro(void *arg); +static void oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2); +static void oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2); +static void oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m); /* Helper function prototypes in this file */ static int oce_attach_ifp(POCE_SOFTC sc); static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag); static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag); static int oce_vid_config(POCE_SOFTC sc); static void oce_mac_addr_set(POCE_SOFTC sc); static int oce_handle_passthrough(struct ifnet *ifp, caddr_t data); static void oce_local_timer(void *arg); static void oce_if_deactivate(POCE_SOFTC sc); static void oce_if_activate(POCE_SOFTC sc); static void setup_max_queues_want(POCE_SOFTC sc); static void update_queues_got(POCE_SOFTC sc); static void process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe); static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m); static void oce_get_config(POCE_SOFTC sc); static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete); +static void oce_read_env_variables(POCE_SOFTC sc); + /* IP specific */ #if defined(INET6) || defined(INET) static int oce_init_lro(POCE_SOFTC sc); -static void oce_rx_flush_lro(struct oce_rq *rq); static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp); #endif static device_method_t oce_dispatch[] = { DEVMETHOD(device_probe, oce_probe), DEVMETHOD(device_attach, oce_attach), DEVMETHOD(device_detach, oce_detach), DEVMETHOD(device_shutdown, oce_shutdown), DEVMETHOD_END }; static driver_t oce_driver = { "oce", oce_dispatch, sizeof(OCE_SOFTC) }; static devclass_t oce_devclass; DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0); MODULE_DEPEND(oce, pci, 1, 1, 1); MODULE_DEPEND(oce, ether, 1, 1, 1); MODULE_VERSION(oce, 1); /* global vars */ const char component_revision[32] = {"///" COMPONENT_REVISION "///"}; /* Module capabilites and parameters */ uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED; uint32_t oce_enable_rss = OCE_MODCAP_RSS; +uint32_t oce_rq_buf_size = 2048; - TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled); TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss); /* Supported devices table */ static uint32_t supportedDevices[] = { (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2, (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3, (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3, (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201, (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF, (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH }; +POCE_SOFTC softc_head = NULL; +POCE_SOFTC softc_tail = NULL; +struct oce_rdma_if *oce_rdma_if = NULL; - /***************************************************************************** * Driver entry points functions * *****************************************************************************/ static int oce_probe(device_t dev) { uint16_t vendor = 0; uint16_t device = 0; int i = 0; char str[256] = {0}; POCE_SOFTC sc; sc = device_get_softc(dev); bzero(sc, sizeof(OCE_SOFTC)); sc->dev = dev; vendor = pci_get_vendor(dev); device = pci_get_device(dev); for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) { if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) { if (device == (supportedDevices[i] & 0xffff)) { sprintf(str, "%s:%s", "Emulex CNA NIC function", component_revision); device_set_desc_copy(dev, str); switch (device) { case PCI_PRODUCT_BE2: sc->flags |= OCE_FLAGS_BE2; break; case PCI_PRODUCT_BE3: sc->flags |= OCE_FLAGS_BE3; break; case PCI_PRODUCT_XE201: case PCI_PRODUCT_XE201_VF: sc->flags |= OCE_FLAGS_XE201; break; case PCI_PRODUCT_SH: sc->flags |= OCE_FLAGS_SH; break; default: return ENXIO; } return BUS_PROBE_DEFAULT; } } } return ENXIO; } static int oce_attach(device_t dev) { POCE_SOFTC sc; int rc = 0; sc = device_get_softc(dev); rc = oce_hw_pci_alloc(sc); if (rc) return rc; sc->tx_ring_size = OCE_TX_RING_SIZE; sc->rx_ring_size = OCE_RX_RING_SIZE; - sc->rq_frag_size = OCE_RQ_BUF_SIZE; + /* receive fragment size should be multiple of 2K */ + sc->rq_frag_size = ((oce_rq_buf_size / 2048) * 2048); sc->flow_control = OCE_DEFAULT_FLOW_CONTROL; sc->promisc = OCE_DEFAULT_PROMISCUOUS; LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock"); LOCK_CREATE(&sc->dev_lock, "Device_lock"); /* initialise the hardware */ rc = oce_hw_init(sc); if (rc) goto pci_res_free; + oce_read_env_variables(sc); + oce_get_config(sc); setup_max_queues_want(sc); rc = oce_setup_intr(sc); if (rc) goto mbox_free; rc = oce_queue_init_all(sc); if (rc) goto intr_free; rc = oce_attach_ifp(sc); if (rc) goto queues_free; #if defined(INET6) || defined(INET) rc = oce_init_lro(sc); if (rc) goto ifp_free; #endif rc = oce_hw_start(sc); if (rc) goto lro_free; sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST); sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST); rc = oce_stats_init(sc); if (rc) goto vlan_free; oce_add_sysctls(sc); - callout_init(&sc->timer, 1); + callout_init(&sc->timer, CALLOUT_MPSAFE); rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc); if (rc) goto stats_free; + sc->next =NULL; + if (softc_tail != NULL) { + softc_tail->next = sc; + } else { + softc_head = sc; + } + softc_tail = sc; + return 0; stats_free: callout_drain(&sc->timer); oce_stats_free(sc); vlan_free: if (sc->vlan_attach) EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach); if (sc->vlan_detach) EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach); oce_hw_intr_disable(sc); lro_free: #if defined(INET6) || defined(INET) oce_free_lro(sc); ifp_free: #endif ether_ifdetach(sc->ifp); if_free(sc->ifp); queues_free: oce_queue_release_all(sc); intr_free: oce_intr_free(sc); mbox_free: oce_dma_free(sc, &sc->bsmbx); pci_res_free: oce_hw_pci_free(sc); LOCK_DESTROY(&sc->dev_lock); LOCK_DESTROY(&sc->bmbx_lock); return rc; } static int oce_detach(device_t dev) { POCE_SOFTC sc = device_get_softc(dev); + POCE_SOFTC poce_sc_tmp, *ppoce_sc_tmp1, poce_sc_tmp2 = NULL; + poce_sc_tmp = softc_head; + ppoce_sc_tmp1 = &softc_head; + while (poce_sc_tmp != NULL) { + if (poce_sc_tmp == sc) { + *ppoce_sc_tmp1 = sc->next; + if (sc->next == NULL) { + softc_tail = poce_sc_tmp2; + } + break; + } + poce_sc_tmp2 = poce_sc_tmp; + ppoce_sc_tmp1 = &poce_sc_tmp->next; + poce_sc_tmp = poce_sc_tmp->next; + } + LOCK(&sc->dev_lock); oce_if_deactivate(sc); UNLOCK(&sc->dev_lock); callout_drain(&sc->timer); if (sc->vlan_attach != NULL) EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach); if (sc->vlan_detach != NULL) EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach); ether_ifdetach(sc->ifp); if_free(sc->ifp); oce_hw_shutdown(sc); bus_generic_detach(dev); return 0; } static int oce_shutdown(device_t dev) { int rc; rc = oce_detach(dev); return rc; } static int oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct ifreq *ifr = (struct ifreq *)data; POCE_SOFTC sc = ifp->if_softc; int rc = 0; uint32_t u; switch (command) { case SIOCGIFMEDIA: rc = ifmedia_ioctl(ifp, ifr, &sc->media, command); break; case SIOCSIFMTU: if (ifr->ifr_mtu > OCE_MAX_MTU) rc = EINVAL; else ifp->if_mtu = ifr->ifr_mtu; break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; oce_init(sc); } device_printf(sc->dev, "Interface Up\n"); } else { LOCK(&sc->dev_lock); sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); oce_if_deactivate(sc); UNLOCK(&sc->dev_lock); device_printf(sc->dev, "Interface Down\n"); } if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) { if (!oce_rxf_set_promiscuous(sc, (1 | (1 << 1)))) sc->promisc = TRUE; } else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) { if (!oce_rxf_set_promiscuous(sc, 0)) sc->promisc = FALSE; } break; case SIOCADDMULTI: case SIOCDELMULTI: rc = oce_hw_update_multicast(sc); if (rc) device_printf(sc->dev, "Update multicast address failed\n"); break; case SIOCSIFCAP: u = ifr->ifr_reqcap ^ ifp->if_capenable; if (u & IFCAP_TXCSUM) { ifp->if_capenable ^= IFCAP_TXCSUM; ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); if (IFCAP_TSO & ifp->if_capenable && !(IFCAP_TXCSUM & ifp->if_capenable)) { ifp->if_capenable &= ~IFCAP_TSO; ifp->if_hwassist &= ~CSUM_TSO; if_printf(ifp, "TSO disabled due to -txcsum.\n"); } } if (u & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; if (u & IFCAP_TSO4) { ifp->if_capenable ^= IFCAP_TSO4; if (IFCAP_TSO & ifp->if_capenable) { if (IFCAP_TXCSUM & ifp->if_capenable) ifp->if_hwassist |= CSUM_TSO; else { ifp->if_capenable &= ~IFCAP_TSO; ifp->if_hwassist &= ~CSUM_TSO; if_printf(ifp, "Enable txcsum first.\n"); rc = EAGAIN; } } else ifp->if_hwassist &= ~CSUM_TSO; } if (u & IFCAP_VLAN_HWTAGGING) ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (u & IFCAP_VLAN_HWFILTER) { ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; oce_vid_config(sc); } #if defined(INET6) || defined(INET) - if (u & IFCAP_LRO) + if (u & IFCAP_LRO) { ifp->if_capenable ^= IFCAP_LRO; + if(sc->enable_hwlro) { + if(ifp->if_capenable & IFCAP_LRO) { + rc = oce_mbox_nic_set_iface_lro_config(sc, 1); + }else { + rc = oce_mbox_nic_set_iface_lro_config(sc, 0); + } + } + } #endif break; case SIOCGPRIVATE_0: rc = oce_handle_passthrough(ifp, data); break; default: rc = ether_ioctl(ifp, command, data); break; } return rc; } static void oce_init(void *arg) { POCE_SOFTC sc = arg; LOCK(&sc->dev_lock); if (sc->ifp->if_flags & IFF_UP) { oce_if_deactivate(sc); oce_if_activate(sc); } UNLOCK(&sc->dev_lock); } static int oce_multiq_start(struct ifnet *ifp, struct mbuf *m) { POCE_SOFTC sc = ifp->if_softc; struct oce_wq *wq = NULL; int queue_index = 0; int status = 0; + if (!sc->link_status) + return ENXIO; + if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) queue_index = m->m_pkthdr.flowid % sc->nwqs; wq = sc->wq[queue_index]; LOCK(&wq->tx_lock); status = oce_multiq_transmit(ifp, m, wq); UNLOCK(&wq->tx_lock); return status; } static void oce_multiq_flush(struct ifnet *ifp) { POCE_SOFTC sc = ifp->if_softc; struct mbuf *m; int i = 0; for (i = 0; i < sc->nwqs; i++) { while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL) m_freem(m); } if_qflush(ifp); } /***************************************************************************** * Driver interrupt routines functions * *****************************************************************************/ static void oce_intr(void *arg, int pending) { POCE_INTR_INFO ii = (POCE_INTR_INFO) arg; POCE_SOFTC sc = ii->sc; struct oce_eq *eq = ii->eq; struct oce_eqe *eqe; struct oce_cq *cq = NULL; int i, num_eqes = 0; bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map, BUS_DMASYNC_POSTWRITE); do { eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe); if (eqe->evnt == 0) break; eqe->evnt = 0; bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map, BUS_DMASYNC_POSTWRITE); RING_GET(eq->ring, 1); num_eqes++; } while (TRUE); if (!num_eqes) goto eq_arm; /* Spurious */ /* Clear EQ entries, but dont arm */ oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE); /* Process TX, RX and MCC. But dont arm CQ*/ for (i = 0; i < eq->cq_valid; i++) { cq = eq->cq[i]; (*cq->cq_handler)(cq->cb_arg); } /* Arm all cqs connected to this EQ */ for (i = 0; i < eq->cq_valid; i++) { cq = eq->cq[i]; oce_arm_cq(sc, cq->cq_id, 0, TRUE); } eq_arm: oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE); return; } static int oce_setup_intr(POCE_SOFTC sc) { int rc = 0, use_intx = 0; int vector = 0, req_vectors = 0; + int tot_req_vectors, tot_vectors; if (is_rss_enabled(sc)) req_vectors = MAX((sc->nrqs - 1), sc->nwqs); else req_vectors = 1; - if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) { + tot_req_vectors = req_vectors; + if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) { + if (req_vectors > 1) { + tot_req_vectors += OCE_RDMA_VECTORS; + sc->roce_intr_count = OCE_RDMA_VECTORS; + } + } + + if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) { sc->intr_count = req_vectors; - rc = pci_alloc_msix(sc->dev, &sc->intr_count); + tot_vectors = tot_req_vectors; + rc = pci_alloc_msix(sc->dev, &tot_vectors); if (rc != 0) { use_intx = 1; pci_release_msi(sc->dev); - } else - sc->flags |= OCE_FLAGS_USING_MSIX; + } else { + if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) { + if (tot_vectors < tot_req_vectors) { + if (sc->intr_count < (2 * OCE_RDMA_VECTORS)) { + sc->roce_intr_count = (tot_vectors / 2); + } + sc->intr_count = tot_vectors - sc->roce_intr_count; + } + } else { + sc->intr_count = tot_vectors; + } + sc->flags |= OCE_FLAGS_USING_MSIX; + } } else use_intx = 1; if (use_intx) sc->intr_count = 1; /* Scale number of queues based on intr we got */ update_queues_got(sc); if (use_intx) { device_printf(sc->dev, "Using legacy interrupt\n"); rc = oce_alloc_intr(sc, vector, oce_intr); if (rc) goto error; } else { for (; vector < sc->intr_count; vector++) { rc = oce_alloc_intr(sc, vector, oce_intr); if (rc) goto error; } } return 0; error: oce_intr_free(sc); return rc; } static int oce_fast_isr(void *arg) { POCE_INTR_INFO ii = (POCE_INTR_INFO) arg; POCE_SOFTC sc = ii->sc; if (ii->eq == NULL) return FILTER_STRAY; oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE); taskqueue_enqueue(ii->tq, &ii->task); ii->eq->intr++; return FILTER_HANDLED; } static int oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending)) { POCE_INTR_INFO ii = &sc->intrs[vector]; int rc = 0, rr; if (vector >= OCE_MAX_EQ) return (EINVAL); /* Set the resource id for the interrupt. * MSIx is vector + 1 for the resource id, * INTx is 0 for the resource id. */ if (sc->flags & OCE_FLAGS_USING_MSIX) rr = vector + 1; else rr = 0; ii->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rr, RF_ACTIVE|RF_SHAREABLE); ii->irq_rr = rr; if (ii->intr_res == NULL) { device_printf(sc->dev, "Could not allocate interrupt\n"); rc = ENXIO; return rc; } TASK_INIT(&ii->task, 0, isr, ii); ii->vector = vector; sprintf(ii->task_name, "oce_task[%d]", ii->vector); ii->tq = taskqueue_create_fast(ii->task_name, M_NOWAIT, taskqueue_thread_enqueue, &ii->tq); taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->dev)); ii->sc = sc; rc = bus_setup_intr(sc->dev, ii->intr_res, INTR_TYPE_NET, oce_fast_isr, NULL, ii, &ii->tag); return rc; } void oce_intr_free(POCE_SOFTC sc) { int i = 0; for (i = 0; i < sc->intr_count; i++) { if (sc->intrs[i].tag != NULL) bus_teardown_intr(sc->dev, sc->intrs[i].intr_res, sc->intrs[i].tag); if (sc->intrs[i].tq != NULL) taskqueue_free(sc->intrs[i].tq); if (sc->intrs[i].intr_res != NULL) bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intrs[i].irq_rr, sc->intrs[i].intr_res); sc->intrs[i].tag = NULL; sc->intrs[i].intr_res = NULL; } if (sc->flags & OCE_FLAGS_USING_MSIX) pci_release_msi(sc->dev); } /****************************************************************************** * Media callbacks functions * ******************************************************************************/ static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req) { POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc; req->ifm_status = IFM_AVALID; req->ifm_active = IFM_ETHER; if (sc->link_status == 1) req->ifm_status |= IFM_ACTIVE; else return; switch (sc->link_speed) { case 1: /* 10 Mbps */ req->ifm_active |= IFM_10_T | IFM_FDX; sc->speed = 10; break; case 2: /* 100 Mbps */ req->ifm_active |= IFM_100_TX | IFM_FDX; sc->speed = 100; break; case 3: /* 1 Gbps */ req->ifm_active |= IFM_1000_T | IFM_FDX; sc->speed = 1000; break; case 4: /* 10 Gbps */ req->ifm_active |= IFM_10G_SR | IFM_FDX; sc->speed = 10000; break; case 5: /* 20 Gbps */ req->ifm_active |= IFM_10G_SR | IFM_FDX; sc->speed = 20000; break; case 6: /* 25 Gbps */ req->ifm_active |= IFM_10G_SR | IFM_FDX; sc->speed = 25000; break; case 7: /* 40 Gbps */ req->ifm_active |= IFM_40G_SR4 | IFM_FDX; sc->speed = 40000; break; default: sc->speed = 0; break; } return; } int oce_media_change(struct ifnet *ifp) { return 0; } +static void oce_is_pkt_dest_bmc(POCE_SOFTC sc, + struct mbuf *m, boolean_t *os2bmc, + struct mbuf **m_new) +{ + struct ether_header *eh = NULL; + eh = mtod(m, struct ether_header *); + if (!is_os2bmc_enabled(sc) || *os2bmc) { + *os2bmc = FALSE; + goto done; + } + if (!ETHER_IS_MULTICAST(eh->ether_dhost)) + goto done; + + if (is_mc_allowed_on_bmc(sc, eh) || + is_bc_allowed_on_bmc(sc, eh) || + is_arp_allowed_on_bmc(sc, ntohs(eh->ether_type))) { + *os2bmc = TRUE; + goto done; + } + + if (mtod(m, struct ip *)->ip_p == IPPROTO_IPV6) { + struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); + uint8_t nexthdr = ip6->ip6_nxt; + if (nexthdr == IPPROTO_ICMPV6) { + struct icmp6_hdr *icmp6 = (struct icmp6_hdr *)(ip6 + 1); + switch (icmp6->icmp6_type) { + case ND_ROUTER_ADVERT: + *os2bmc = is_ipv6_ra_filt_enabled(sc); + goto done; + case ND_NEIGHBOR_ADVERT: + *os2bmc = is_ipv6_na_filt_enabled(sc); + goto done; + default: + break; + } + } + } + + if (mtod(m, struct ip *)->ip_p == IPPROTO_UDP) { + struct ip *ip = mtod(m, struct ip *); + int iphlen = ip->ip_hl << 2; + struct udphdr *uh = (struct udphdr *)((caddr_t)ip + iphlen); + switch (uh->uh_dport) { + case DHCP_CLIENT_PORT: + *os2bmc = is_dhcp_client_filt_enabled(sc); + goto done; + case DHCP_SERVER_PORT: + *os2bmc = is_dhcp_srvr_filt_enabled(sc); + goto done; + case NET_BIOS_PORT1: + case NET_BIOS_PORT2: + *os2bmc = is_nbios_filt_enabled(sc); + goto done; + case DHCPV6_RAS_PORT: + *os2bmc = is_ipv6_ras_filt_enabled(sc); + goto done; + default: + break; + } + } +done: + if (*os2bmc) { + *m_new = m_dup(m, M_NOWAIT); + if (!*m_new) { + *os2bmc = FALSE; + return; + } + *m_new = oce_insert_vlan_tag(sc, *m_new, NULL); + } +} + + + /***************************************************************************** * Transmit routines functions * *****************************************************************************/ static int oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index) { int rc = 0, i, retry_cnt = 0; bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS]; - struct mbuf *m, *m_temp; + struct mbuf *m, *m_temp, *m_new = NULL; struct oce_wq *wq = sc->wq[wq_index]; struct oce_packet_desc *pd; struct oce_nic_hdr_wqe *nichdr; struct oce_nic_frag_wqe *nicfrag; + struct ether_header *eh = NULL; int num_wqes; uint32_t reg_value; boolean_t complete = TRUE; + boolean_t os2bmc = FALSE; m = *mpp; if (!m) return EINVAL; if (!(m->m_flags & M_PKTHDR)) { rc = ENXIO; goto free_ret; } + /* Don't allow non-TSO packets longer than MTU */ + if (!is_tso_pkt(m)) { + eh = mtod(m, struct ether_header *); + if(m->m_pkthdr.len > ETHER_MAX_FRAME(sc->ifp, eh->ether_type, FALSE)) + goto free_ret; + } + if(oce_tx_asic_stall_verify(sc, m)) { m = oce_insert_vlan_tag(sc, m, &complete); if(!m) { device_printf(sc->dev, "Insertion unsuccessful\n"); return 0; } } + /* Lancer, SH ASIC has a bug wherein Packets that are 32 bytes or less + * may cause a transmit stall on that port. So the work-around is to + * pad short packets (<= 32 bytes) to a 36-byte length. + */ + if(IS_SH(sc) || IS_XE201(sc) ) { + if(m->m_pkthdr.len <= 32) { + char buf[36]; + bzero((void *)buf, 36); + m_append(m, (36 - m->m_pkthdr.len), buf); + } + } + +tx_start: if (m->m_pkthdr.csum_flags & CSUM_TSO) { /* consolidate packet buffers for TSO/LSO segment offload */ #if defined(INET6) || defined(INET) m = oce_tso_setup(sc, mpp); #else m = NULL; #endif if (m == NULL) { rc = ENXIO; goto free_ret; } } + pd = &wq->pckts[wq->pkt_desc_head]; + retry: rc = bus_dmamap_load_mbuf_sg(wq->tag, pd->map, m, segs, &pd->nsegs, BUS_DMA_NOWAIT); if (rc == 0) { num_wqes = pd->nsegs + 1; if (IS_BE(sc) || IS_SH(sc)) { /*Dummy required only for BE3.*/ if (num_wqes & 1) num_wqes++; } if (num_wqes >= RING_NUM_FREE(wq->ring)) { bus_dmamap_unload(wq->tag, pd->map); return EBUSY; } atomic_store_rel_int(&wq->pkt_desc_head, (wq->pkt_desc_head + 1) % \ OCE_WQ_PACKET_ARRAY_SIZE); bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE); pd->mbuf = m; nichdr = RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe); nichdr->u0.dw[0] = 0; nichdr->u0.dw[1] = 0; nichdr->u0.dw[2] = 0; nichdr->u0.dw[3] = 0; nichdr->u0.s.complete = complete; + nichdr->u0.s.mgmt = os2bmc; nichdr->u0.s.event = 1; nichdr->u0.s.crc = 1; nichdr->u0.s.forward = 0; nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0; nichdr->u0.s.udpcs = (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0; nichdr->u0.s.tcpcs = (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0; nichdr->u0.s.num_wqe = num_wqes; nichdr->u0.s.total_length = m->m_pkthdr.len; if (m->m_flags & M_VLANTAG) { nichdr->u0.s.vlan = 1; /*Vlan present*/ nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag; } if (m->m_pkthdr.csum_flags & CSUM_TSO) { if (m->m_pkthdr.tso_segsz) { nichdr->u0.s.lso = 1; nichdr->u0.s.lso_mss = m->m_pkthdr.tso_segsz; } if (!IS_BE(sc) || !IS_SH(sc)) nichdr->u0.s.ipcs = 1; } RING_PUT(wq->ring, 1); atomic_add_int(&wq->ring->num_used, 1); for (i = 0; i < pd->nsegs; i++) { nicfrag = RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_frag_wqe); nicfrag->u0.s.rsvd0 = 0; nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr); nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr); nicfrag->u0.s.frag_len = segs[i].ds_len; pd->wqe_idx = wq->ring->pidx; RING_PUT(wq->ring, 1); atomic_add_int(&wq->ring->num_used, 1); } if (num_wqes > (pd->nsegs + 1)) { nicfrag = RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_frag_wqe); nicfrag->u0.dw[0] = 0; nicfrag->u0.dw[1] = 0; nicfrag->u0.dw[2] = 0; nicfrag->u0.dw[3] = 0; pd->wqe_idx = wq->ring->pidx; RING_PUT(wq->ring, 1); atomic_add_int(&wq->ring->num_used, 1); pd->nsegs++; } if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1); wq->tx_stats.tx_reqs++; wq->tx_stats.tx_wrbs += num_wqes; wq->tx_stats.tx_bytes += m->m_pkthdr.len; wq->tx_stats.tx_pkts++; bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); reg_value = (num_wqes << 16) | wq->wq_id; + + /* if os2bmc is not enabled or if the pkt is already tagged as + bmc, do nothing + */ + oce_is_pkt_dest_bmc(sc, m, &os2bmc, &m_new); + OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value); } else if (rc == EFBIG) { if (retry_cnt == 0) { m_temp = m_defrag(m, M_NOWAIT); if (m_temp == NULL) goto free_ret; m = m_temp; *mpp = m_temp; retry_cnt = retry_cnt + 1; goto retry; } else goto free_ret; } else if (rc == ENOMEM) return rc; else goto free_ret; + + if (os2bmc) { + m = m_new; + goto tx_start; + } return 0; free_ret: m_freem(*mpp); *mpp = NULL; return rc; } static void -oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status) +oce_process_tx_completion(struct oce_wq *wq) { struct oce_packet_desc *pd; POCE_SOFTC sc = (POCE_SOFTC) wq->parent; struct mbuf *m; pd = &wq->pckts[wq->pkt_desc_tail]; atomic_store_rel_int(&wq->pkt_desc_tail, (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE); atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1); bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(wq->tag, pd->map); m = pd->mbuf; m_freem(m); pd->mbuf = NULL; if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) { if (wq->ring->num_used < (wq->ring->num_items / 2)) { sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE); oce_tx_restart(sc, wq); } } } static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq) { if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING) return; #if __FreeBSD_version >= 800000 if (!drbr_empty(sc->ifp, wq->br)) #else if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd)) #endif taskqueue_enqueue(taskqueue_swi, &wq->txtask); } #if defined(INET6) || defined(INET) static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp) { struct mbuf *m; #ifdef INET struct ip *ip; #endif #ifdef INET6 struct ip6_hdr *ip6; #endif struct ether_vlan_header *eh; struct tcphdr *th; uint16_t etype; int total_len = 0, ehdrlen = 0; m = *mpp; if (M_WRITABLE(m) == 0) { m = m_dup(*mpp, M_NOWAIT); if (!m) return NULL; m_freem(*mpp); *mpp = m; } eh = mtod(m, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { etype = ntohs(eh->evl_proto); ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { etype = ntohs(eh->evl_encap_proto); ehdrlen = ETHER_HDR_LEN; } switch (etype) { #ifdef INET case ETHERTYPE_IP: ip = (struct ip *)(m->m_data + ehdrlen); if (ip->ip_p != IPPROTO_TCP) return NULL; th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2); break; #endif #ifdef INET6 case ETHERTYPE_IPV6: ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen); if (ip6->ip6_nxt != IPPROTO_TCP) return NULL; th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2); break; #endif default: return NULL; } m = m_pullup(m, total_len); if (!m) return NULL; *mpp = m; return m; } #endif /* INET6 || INET */ void oce_tx_task(void *arg, int npending) { struct oce_wq *wq = arg; POCE_SOFTC sc = wq->parent; struct ifnet *ifp = sc->ifp; int rc = 0; #if __FreeBSD_version >= 800000 LOCK(&wq->tx_lock); rc = oce_multiq_transmit(ifp, NULL, wq); if (rc) { device_printf(sc->dev, "TX[%d] restart failed\n", wq->queue_index); } UNLOCK(&wq->tx_lock); #else oce_start(ifp); #endif } void oce_start(struct ifnet *ifp) { POCE_SOFTC sc = ifp->if_softc; struct mbuf *m; int rc = 0; int def_q = 0; /* Defualt tx queue is 0*/ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; if (!sc->link_status) return; do { IF_DEQUEUE(&sc->ifp->if_snd, m); if (m == NULL) break; LOCK(&sc->wq[def_q]->tx_lock); rc = oce_tx(sc, &m, def_q); UNLOCK(&sc->wq[def_q]->tx_lock); if (rc) { if (m != NULL) { sc->wq[def_q]->tx_stats.tx_stops ++; ifp->if_drv_flags |= IFF_DRV_OACTIVE; IFQ_DRV_PREPEND(&ifp->if_snd, m); m = NULL; } break; } if (m != NULL) ETHER_BPF_MTAP(ifp, m); } while (TRUE); return; } /* Handle the Completion Queue for transmit */ uint16_t oce_wq_handler(void *arg) { struct oce_wq *wq = (struct oce_wq *)arg; POCE_SOFTC sc = wq->parent; struct oce_cq *cq = wq->cq; struct oce_nic_tx_cqe *cqe; int num_cqes = 0; + LOCK(&wq->tx_compl_lock); bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe); while (cqe->u0.dw[3]) { DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe)); wq->ring->cidx = cqe->u0.s.wqe_index + 1; if (wq->ring->cidx >= wq->ring->num_items) wq->ring->cidx -= wq->ring->num_items; - oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status); + oce_process_tx_completion(wq); wq->tx_stats.tx_compl++; cqe->u0.dw[3] = 0; RING_GET(cq->ring, 1); bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe); num_cqes++; } if (num_cqes) oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE); - - return 0; + + UNLOCK(&wq->tx_compl_lock); + return num_cqes; } static int oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq) { POCE_SOFTC sc = ifp->if_softc; int status = 0, queue_index = 0; struct mbuf *next = NULL; struct buf_ring *br = NULL; br = wq->br; queue_index = wq->queue_index; if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) { if (m != NULL) status = drbr_enqueue(ifp, br, m); return status; } if (m != NULL) { if ((status = drbr_enqueue(ifp, br, m)) != 0) return status; } while ((next = drbr_peek(ifp, br)) != NULL) { if (oce_tx(sc, &next, queue_index)) { if (next == NULL) { drbr_advance(ifp, br); } else { drbr_putback(ifp, br, next); wq->tx_stats.tx_stops ++; ifp->if_drv_flags |= IFF_DRV_OACTIVE; } break; } drbr_advance(ifp, br); if_inc_counter(ifp, IFCOUNTER_OBYTES, next->m_pkthdr.len); if (next->m_flags & M_MCAST) if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); ETHER_BPF_MTAP(ifp, next); } return 0; } /***************************************************************************** * Receive routines functions * *****************************************************************************/ static void -oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe) +oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2) { - uint32_t out; - struct oce_packet_desc *pd; + uint32_t *p; + struct ether_header *eh = NULL; + struct tcphdr *tcp_hdr = NULL; + struct ip *ip4_hdr = NULL; + struct ip6_hdr *ip6 = NULL; + uint32_t payload_len = 0; + + eh = mtod(m, struct ether_header *); + /* correct IP header */ + if(!cqe2->ipv6_frame) { + ip4_hdr = (struct ip *)((char*)eh + sizeof(struct ether_header)); + ip4_hdr->ip_ttl = cqe2->frame_lifespan; + ip4_hdr->ip_len = htons(cqe2->coalesced_size - sizeof(struct ether_header)); + tcp_hdr = (struct tcphdr *)((char*)ip4_hdr + sizeof(struct ip)); + }else { + ip6 = (struct ip6_hdr *)((char*)eh + sizeof(struct ether_header)); + ip6->ip6_ctlun.ip6_un1.ip6_un1_hlim = cqe2->frame_lifespan; + payload_len = cqe2->coalesced_size - sizeof(struct ether_header) + - sizeof(struct ip6_hdr); + ip6->ip6_ctlun.ip6_un1.ip6_un1_plen = htons(payload_len); + tcp_hdr = (struct tcphdr *)((char*)ip6 + sizeof(struct ip6_hdr)); + } + + /* correct tcp header */ + tcp_hdr->th_ack = htonl(cqe2->tcp_ack_num); + if(cqe2->push) { + tcp_hdr->th_flags |= TH_PUSH; + } + tcp_hdr->th_win = htons(cqe2->tcp_window); + tcp_hdr->th_sum = 0xffff; + if(cqe2->ts_opt) { + p = (uint32_t *)((char*)tcp_hdr + sizeof(struct tcphdr) + 2); + *p = cqe1->tcp_timestamp_val; + *(p+1) = cqe1->tcp_timestamp_ecr; + } + + return; +} + +static void +oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m) +{ POCE_SOFTC sc = (POCE_SOFTC) rq->parent; - int i, len, frag_len; - struct mbuf *m = NULL, *tail = NULL; - uint16_t vtag; + uint32_t i = 0, frag_len = 0; + uint32_t len = cqe_info->pkt_size; + struct oce_packet_desc *pd; + struct mbuf *tail = NULL; + for (i = 0; i < cqe_info->num_frags; i++) { + if (rq->ring->cidx == rq->ring->pidx) { + device_printf(sc->dev, + "oce_rx_mbuf_chain: Invalid RX completion - Queue is empty\n"); + return; + } + pd = &rq->pckts[rq->ring->cidx]; + + bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(rq->tag, pd->map); + RING_GET(rq->ring, 1); + rq->pending--; + + frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len; + pd->mbuf->m_len = frag_len; + + if (tail != NULL) { + /* additional fragments */ + pd->mbuf->m_flags &= ~M_PKTHDR; + tail->m_next = pd->mbuf; + if(rq->islro) + tail->m_nextpkt = NULL; + tail = pd->mbuf; + } else { + /* first fragment, fill out much of the packet header */ + pd->mbuf->m_pkthdr.len = len; + if(rq->islro) + pd->mbuf->m_nextpkt = NULL; + pd->mbuf->m_pkthdr.csum_flags = 0; + if (IF_CSUM_ENABLED(sc)) { + if (cqe_info->l4_cksum_pass) { + if(!cqe_info->ipv6_frame) { /* IPV4 */ + pd->mbuf->m_pkthdr.csum_flags |= + (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); + }else { /* IPV6 frame */ + if(rq->islro) { + pd->mbuf->m_pkthdr.csum_flags |= + (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); + } + } + pd->mbuf->m_pkthdr.csum_data = 0xffff; + } + if (cqe_info->ip_cksum_pass) { + pd->mbuf->m_pkthdr.csum_flags |= + (CSUM_IP_CHECKED|CSUM_IP_VALID); + } + } + *m = tail = pd->mbuf; + } + pd->mbuf = NULL; + len -= frag_len; + } + + return; +} + +static void +oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2) +{ + POCE_SOFTC sc = (POCE_SOFTC) rq->parent; + struct nic_hwlro_cqe_part1 *cqe1 = NULL; + struct mbuf *m = NULL; + struct oce_common_cqe_info cq_info; + + /* parse cqe */ + if(cqe2 == NULL) { + cq_info.pkt_size = cqe->pkt_size; + cq_info.vtag = cqe->vlan_tag; + cq_info.l4_cksum_pass = cqe->l4_cksum_pass; + cq_info.ip_cksum_pass = cqe->ip_cksum_pass; + cq_info.ipv6_frame = cqe->ipv6_frame; + cq_info.vtp = cqe->vtp; + cq_info.qnq = cqe->qnq; + }else { + cqe1 = (struct nic_hwlro_cqe_part1 *)cqe; + cq_info.pkt_size = cqe2->coalesced_size; + cq_info.vtag = cqe2->vlan_tag; + cq_info.l4_cksum_pass = cqe2->l4_cksum_pass; + cq_info.ip_cksum_pass = cqe2->ip_cksum_pass; + cq_info.ipv6_frame = cqe2->ipv6_frame; + cq_info.vtp = cqe2->vtp; + cq_info.qnq = cqe1->qnq; + } + + cq_info.vtag = BSWAP_16(cq_info.vtag); + + cq_info.num_frags = cq_info.pkt_size / rq->cfg.frag_size; + if(cq_info.pkt_size % rq->cfg.frag_size) + cq_info.num_frags++; + + oce_rx_mbuf_chain(rq, &cq_info, &m); + + if (m) { + if(cqe2) { + //assert(cqe2->valid != 0); + + //assert(cqe2->cqe_type != 2); + oce_correct_header(m, cqe1, cqe2); + } + + m->m_pkthdr.rcvif = sc->ifp; +#if __FreeBSD_version >= 800000 + if (rq->queue_index) + m->m_pkthdr.flowid = (rq->queue_index - 1); + else + m->m_pkthdr.flowid = rq->queue_index; + M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); +#endif + /* This deternies if vlan tag is Valid */ + if (cq_info.vtp) { + if (sc->function_mode & FNM_FLEX10_MODE) { + /* FLEX10. If QnQ is not set, neglect VLAN */ + if (cq_info.qnq) { + m->m_pkthdr.ether_vtag = cq_info.vtag; + m->m_flags |= M_VLANTAG; + } + } else if (sc->pvid != (cq_info.vtag & VLAN_VID_MASK)) { + /* In UMC mode generally pvid will be striped by + hw. But in some cases we have seen it comes + with pvid. So if pvid == vlan, neglect vlan. + */ + m->m_pkthdr.ether_vtag = cq_info.vtag; + m->m_flags |= M_VLANTAG; + } + } + if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1); + + (*sc->ifp->if_input) (sc->ifp, m); + + /* Update rx stats per queue */ + rq->rx_stats.rx_pkts++; + rq->rx_stats.rx_bytes += cq_info.pkt_size; + rq->rx_stats.rx_frags += cq_info.num_frags; + rq->rx_stats.rx_ucast_pkts++; + } + return; +} + +static void +oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe) +{ + POCE_SOFTC sc = (POCE_SOFTC) rq->parent; + int len; + struct mbuf *m = NULL; + struct oce_common_cqe_info cq_info; + uint16_t vtag = 0; + + /* Is it a flush compl that has no data */ + if(!cqe->u0.s.num_fragments) + goto exit; + len = cqe->u0.s.pkt_size; if (!len) { /*partial DMA workaround for Lancer*/ - oce_discard_rx_comp(rq, cqe); + oce_discard_rx_comp(rq, cqe->u0.s.num_fragments); goto exit; } + if (!oce_cqe_portid_valid(sc, cqe)) { + oce_discard_rx_comp(rq, cqe->u0.s.num_fragments); + goto exit; + } + /* Get vlan_tag value */ if(IS_BE(sc) || IS_SH(sc)) vtag = BSWAP_16(cqe->u0.s.vlan_tag); else vtag = cqe->u0.s.vlan_tag; + + cq_info.l4_cksum_pass = cqe->u0.s.l4_cksum_pass; + cq_info.ip_cksum_pass = cqe->u0.s.ip_cksum_pass; + cq_info.ipv6_frame = cqe->u0.s.ip_ver; + cq_info.num_frags = cqe->u0.s.num_fragments; + cq_info.pkt_size = cqe->u0.s.pkt_size; + oce_rx_mbuf_chain(rq, &cq_info, &m); - for (i = 0; i < cqe->u0.s.num_fragments; i++) { - - if (rq->packets_out == rq->packets_in) { - device_printf(sc->dev, - "RQ transmit descriptor missing\n"); - } - out = rq->packets_out + 1; - if (out == OCE_RQ_PACKET_ARRAY_SIZE) - out = 0; - pd = &rq->pckts[rq->packets_out]; - rq->packets_out = out; - - bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(rq->tag, pd->map); - rq->pending--; - - frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len; - pd->mbuf->m_len = frag_len; - - if (tail != NULL) { - /* additional fragments */ - pd->mbuf->m_flags &= ~M_PKTHDR; - tail->m_next = pd->mbuf; - tail = pd->mbuf; - } else { - /* first fragment, fill out much of the packet header */ - pd->mbuf->m_pkthdr.len = len; - pd->mbuf->m_pkthdr.csum_flags = 0; - if (IF_CSUM_ENABLED(sc)) { - if (cqe->u0.s.l4_cksum_pass) { - pd->mbuf->m_pkthdr.csum_flags |= - (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); - pd->mbuf->m_pkthdr.csum_data = 0xffff; - } - if (cqe->u0.s.ip_cksum_pass) { - if (!cqe->u0.s.ip_ver) { /* IPV4 */ - pd->mbuf->m_pkthdr.csum_flags |= - (CSUM_IP_CHECKED|CSUM_IP_VALID); - } - } - } - m = tail = pd->mbuf; - } - pd->mbuf = NULL; - len -= frag_len; - } - if (m) { - if (!oce_cqe_portid_valid(sc, cqe)) { - m_freem(m); - goto exit; - } - m->m_pkthdr.rcvif = sc->ifp; #if __FreeBSD_version >= 800000 if (rq->queue_index) m->m_pkthdr.flowid = (rq->queue_index - 1); else m->m_pkthdr.flowid = rq->queue_index; M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); #endif /* This deternies if vlan tag is Valid */ if (oce_cqe_vtp_valid(sc, cqe)) { if (sc->function_mode & FNM_FLEX10_MODE) { /* FLEX10. If QnQ is not set, neglect VLAN */ if (cqe->u0.s.qnq) { m->m_pkthdr.ether_vtag = vtag; m->m_flags |= M_VLANTAG; } } else if (sc->pvid != (vtag & VLAN_VID_MASK)) { /* In UMC mode generally pvid will be striped by hw. But in some cases we have seen it comes with pvid. So if pvid == vlan, neglect vlan. */ m->m_pkthdr.ether_vtag = vtag; m->m_flags |= M_VLANTAG; } } if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1); #if defined(INET6) || defined(INET) /* Try to queue to LRO */ if (IF_LRO_ENABLED(sc) && (cqe->u0.s.ip_cksum_pass) && (cqe->u0.s.l4_cksum_pass) && (!cqe->u0.s.ip_ver) && (rq->lro.lro_cnt != 0)) { if (tcp_lro_rx(&rq->lro, m, 0) == 0) { rq->lro_pkts_queued ++; goto post_done; } /* If LRO posting fails then try to post to STACK */ } #endif (*sc->ifp->if_input) (sc->ifp, m); #if defined(INET6) || defined(INET) post_done: #endif /* Update rx stats per queue */ rq->rx_stats.rx_pkts++; rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size; rq->rx_stats.rx_frags += cqe->u0.s.num_fragments; if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET) rq->rx_stats.rx_mcast_pkts++; if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET) rq->rx_stats.rx_ucast_pkts++; } exit: return; } -static void -oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe) +void +oce_discard_rx_comp(struct oce_rq *rq, int num_frags) { - uint32_t out, i = 0; + uint32_t i = 0; struct oce_packet_desc *pd; POCE_SOFTC sc = (POCE_SOFTC) rq->parent; - int num_frags = cqe->u0.s.num_fragments; for (i = 0; i < num_frags; i++) { - if (rq->packets_out == rq->packets_in) { - device_printf(sc->dev, - "RQ transmit descriptor missing\n"); - } - out = rq->packets_out + 1; - if (out == OCE_RQ_PACKET_ARRAY_SIZE) - out = 0; - pd = &rq->pckts[rq->packets_out]; - rq->packets_out = out; + if (rq->ring->cidx == rq->ring->pidx) { + device_printf(sc->dev, + "oce_discard_rx_comp: Invalid RX completion - Queue is empty\n"); + return; + } + pd = &rq->pckts[rq->ring->cidx]; + bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(rq->tag, pd->map); + if (pd->mbuf != NULL) { + m_freem(pd->mbuf); + pd->mbuf = NULL; + } - bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(rq->tag, pd->map); - rq->pending--; - m_freem(pd->mbuf); + RING_GET(rq->ring, 1); + rq->pending--; } - } static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe) { struct oce_nic_rx_cqe_v1 *cqe_v1; int vtp = 0; if (sc->be3_native) { cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe; vtp = cqe_v1->u0.s.vlan_tag_present; } else vtp = cqe->u0.s.vlan_tag_present; return vtp; } static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe) { struct oce_nic_rx_cqe_v1 *cqe_v1; int port_id = 0; if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) { cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe; port_id = cqe_v1->u0.s.port; if (sc->port_id != port_id) return 0; } else ;/* For BE3 legacy and Lancer this is dummy */ return 1; } #if defined(INET6) || defined(INET) -static void +void oce_rx_flush_lro(struct oce_rq *rq) { struct lro_ctrl *lro = &rq->lro; POCE_SOFTC sc = (POCE_SOFTC) rq->parent; if (!IF_LRO_ENABLED(sc)) return; tcp_lro_flush_all(lro); rq->lro_pkts_queued = 0; return; } static int oce_init_lro(POCE_SOFTC sc) { struct lro_ctrl *lro = NULL; int i = 0, rc = 0; for (i = 0; i < sc->nrqs; i++) { lro = &sc->rq[i]->lro; rc = tcp_lro_init(lro); if (rc != 0) { device_printf(sc->dev, "LRO init failed\n"); return rc; } lro->ifp = sc->ifp; } return rc; } void oce_free_lro(POCE_SOFTC sc) { struct lro_ctrl *lro = NULL; int i = 0; for (i = 0; i < sc->nrqs; i++) { lro = &sc->rq[i]->lro; if (lro) tcp_lro_free(lro); } } #endif int oce_alloc_rx_bufs(struct oce_rq *rq, int count) { POCE_SOFTC sc = (POCE_SOFTC) rq->parent; int i, in, rc; struct oce_packet_desc *pd; bus_dma_segment_t segs[6]; int nsegs, added = 0; struct oce_nic_rqe *rqe; pd_rxulp_db_t rxdb_reg; + uint32_t val = 0; + uint32_t oce_max_rq_posts = 64; bzero(&rxdb_reg, sizeof(pd_rxulp_db_t)); for (i = 0; i < count; i++) { - in = rq->packets_in + 1; - if (in == OCE_RQ_PACKET_ARRAY_SIZE) - in = 0; - if (in == rq->packets_out) - break; /* no more room */ + in = (rq->ring->pidx + 1) % OCE_RQ_PACKET_ARRAY_SIZE; - pd = &rq->pckts[rq->packets_in]; - pd->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); - if (pd->mbuf == NULL) + pd = &rq->pckts[rq->ring->pidx]; + pd->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, oce_rq_buf_size); + if (pd->mbuf == NULL) { + device_printf(sc->dev, "mbuf allocation failed, size = %d\n",oce_rq_buf_size); break; + } + pd->mbuf->m_nextpkt = NULL; - pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES; + pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = rq->cfg.frag_size; + rc = bus_dmamap_load_mbuf_sg(rq->tag, pd->map, pd->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); if (rc) { m_free(pd->mbuf); + device_printf(sc->dev, "bus_dmamap_load_mbuf_sg failed rc = %d\n", rc); break; } if (nsegs != 1) { i--; continue; } - rq->packets_in = in; bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD); rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe); rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr); rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr); DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe)); RING_PUT(rq->ring, 1); added++; rq->pending++; } + oce_max_rq_posts = sc->enable_hwlro ? OCE_HWLRO_MAX_RQ_POSTS : OCE_MAX_RQ_POSTS; if (added != 0) { - for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) { - rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS; + for (i = added / oce_max_rq_posts; i > 0; i--) { + rxdb_reg.bits.num_posted = oce_max_rq_posts; rxdb_reg.bits.qid = rq->rq_id; - OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0); - added -= OCE_MAX_RQ_POSTS; + if(rq->islro) { + val |= rq->rq_id & DB_LRO_RQ_ID_MASK; + val |= oce_max_rq_posts << 16; + OCE_WRITE_REG32(sc, db, DB_OFFSET, val); + }else { + OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0); + } + added -= oce_max_rq_posts; } if (added > 0) { rxdb_reg.bits.qid = rq->rq_id; rxdb_reg.bits.num_posted = added; - OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0); + if(rq->islro) { + val |= rq->rq_id & DB_LRO_RQ_ID_MASK; + val |= added << 16; + OCE_WRITE_REG32(sc, db, DB_OFFSET, val); + }else { + OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0); + } } } return 0; } +static void +oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq) +{ + if (num_cqes) { + oce_arm_cq(sc, rq->cq->cq_id, num_cqes, FALSE); + if(!sc->enable_hwlro) { + if((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) > 1) + oce_alloc_rx_bufs(rq, ((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) - 1)); + }else { + if ((OCE_RQ_PACKET_ARRAY_SIZE -1 - rq->pending) > 64) + oce_alloc_rx_bufs(rq, 64); + } + } + return; +} + +uint16_t +oce_rq_handler_lro(void *arg) +{ + struct oce_rq *rq = (struct oce_rq *)arg; + struct oce_cq *cq = rq->cq; + POCE_SOFTC sc = rq->parent; + struct nic_hwlro_singleton_cqe *cqe; + struct nic_hwlro_cqe_part2 *cqe2; + int num_cqes = 0; + + LOCK(&rq->rx_lock); + bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); + cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe); + while (cqe->valid) { + if(cqe->cqe_type == 0) { /* singleton cqe */ + /* we should not get singleton cqe after cqe1 on same rq */ + if(rq->cqe_firstpart != NULL) { + device_printf(sc->dev, "Got singleton cqe after cqe1 \n"); + goto exit_rq_handler_lro; + } + if(cqe->error != 0) { + rq->rx_stats.rxcp_err++; + if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); + } + oce_rx_lro(rq, cqe, NULL); + rq->rx_stats.rx_compl++; + cqe->valid = 0; + RING_GET(cq->ring, 1); + num_cqes++; + if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled)) + break; + }else if(cqe->cqe_type == 0x1) { /* first part */ + /* we should not get cqe1 after cqe1 on same rq */ + if(rq->cqe_firstpart != NULL) { + device_printf(sc->dev, "Got cqe1 after cqe1 \n"); + goto exit_rq_handler_lro; + } + rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe; + RING_GET(cq->ring, 1); + }else if(cqe->cqe_type == 0x2) { /* second part */ + cqe2 = (struct nic_hwlro_cqe_part2 *)cqe; + if(cqe2->error != 0) { + rq->rx_stats.rxcp_err++; + if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); + } + /* We should not get cqe2 without cqe1 */ + if(rq->cqe_firstpart == NULL) { + device_printf(sc->dev, "Got cqe2 without cqe1 \n"); + goto exit_rq_handler_lro; + } + oce_rx_lro(rq, (struct nic_hwlro_singleton_cqe *)rq->cqe_firstpart, cqe2); + + rq->rx_stats.rx_compl++; + rq->cqe_firstpart->valid = 0; + cqe2->valid = 0; + rq->cqe_firstpart = NULL; + + RING_GET(cq->ring, 1); + num_cqes += 2; + if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled)) + break; + } + + bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); + cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe); + } + oce_check_rx_bufs(sc, num_cqes, rq); +exit_rq_handler_lro: + UNLOCK(&rq->rx_lock); + return 0; +} + /* Handle the Completion Queue for receive */ uint16_t oce_rq_handler(void *arg) { struct oce_rq *rq = (struct oce_rq *)arg; struct oce_cq *cq = rq->cq; POCE_SOFTC sc = rq->parent; struct oce_nic_rx_cqe *cqe; - int num_cqes = 0, rq_buffers_used = 0; + int num_cqes = 0; - + if(rq->islro) { + oce_rq_handler_lro(arg); + return 0; + } + LOCK(&rq->rx_lock); bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe); while (cqe->u0.dw[2]) { DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe)); - RING_GET(rq->ring, 1); if (cqe->u0.s.error == 0) { - oce_rx(rq, cqe->u0.s.frag_index, cqe); + oce_rx(rq, cqe); } else { rq->rx_stats.rxcp_err++; if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); /* Post L3/L4 errors to stack.*/ - oce_rx(rq, cqe->u0.s.frag_index, cqe); + oce_rx(rq, cqe); } rq->rx_stats.rx_compl++; cqe->u0.dw[2] = 0; #if defined(INET6) || defined(INET) if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) { oce_rx_flush_lro(rq); } #endif RING_GET(cq->ring, 1); bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe); num_cqes++; if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled)) break; } #if defined(INET6) || defined(INET) - if (IF_LRO_ENABLED(sc)) - oce_rx_flush_lro(rq); + if (IF_LRO_ENABLED(sc)) + oce_rx_flush_lro(rq); #endif - - if (num_cqes) { - oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE); - rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending; - if (rq_buffers_used > 1) - oce_alloc_rx_bufs(rq, (rq_buffers_used - 1)); - } + oce_check_rx_bufs(sc, num_cqes, rq); + UNLOCK(&rq->rx_lock); return 0; } /***************************************************************************** * Helper function prototypes in this file * *****************************************************************************/ static int oce_attach_ifp(POCE_SOFTC sc) { sc->ifp = if_alloc(IFT_ETHER); if (!sc->ifp) return ENOMEM; ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status); ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST; sc->ifp->if_ioctl = oce_ioctl; sc->ifp->if_start = oce_start; sc->ifp->if_init = oce_init; sc->ifp->if_mtu = ETHERMTU; sc->ifp->if_softc = sc; #if __FreeBSD_version >= 800000 sc->ifp->if_transmit = oce_multiq_start; sc->ifp->if_qflush = oce_multiq_flush; #endif if_initname(sc->ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1; IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen); IFQ_SET_READY(&sc->ifp->if_snd); sc->ifp->if_hwassist = OCE_IF_HWASSIST; sc->ifp->if_hwassist |= CSUM_TSO; sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP); sc->ifp->if_capabilities = OCE_IF_CAPABILITIES; sc->ifp->if_capabilities |= IFCAP_HWCSUM; sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; #if defined(INET6) || defined(INET) sc->ifp->if_capabilities |= IFCAP_TSO; sc->ifp->if_capabilities |= IFCAP_LRO; sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO; #endif sc->ifp->if_capenable = sc->ifp->if_capabilities; sc->ifp->if_baudrate = IF_Gbps(10); #if __FreeBSD_version >= 1000000 sc->ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); sc->ifp->if_hw_tsomaxsegcount = OCE_MAX_TX_ELEMENTS; sc->ifp->if_hw_tsomaxsegsize = 4096; #endif ether_ifattach(sc->ifp, sc->macaddr.mac_addr); return 0; } static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag) { POCE_SOFTC sc = ifp->if_softc; if (ifp->if_softc != arg) return; if ((vtag == 0) || (vtag > 4095)) return; sc->vlan_tag[vtag] = 1; sc->vlans_added++; if (sc->vlans_added <= (sc->max_vlans + 1)) oce_vid_config(sc); } static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag) { POCE_SOFTC sc = ifp->if_softc; if (ifp->if_softc != arg) return; if ((vtag == 0) || (vtag > 4095)) return; sc->vlan_tag[vtag] = 0; sc->vlans_added--; oce_vid_config(sc); } /* * A max of 64 vlans can be configured in BE. If the user configures * more, place the card in vlan promiscuous mode. */ static int oce_vid_config(POCE_SOFTC sc) { struct normal_vlan vtags[MAX_VLANFILTER_SIZE]; uint16_t ntags = 0, i; int status = 0; if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) && (sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) { for (i = 0; i < MAX_VLANS; i++) { if (sc->vlan_tag[i]) { vtags[ntags].vtag = i; ntags++; } } if (ntags) status = oce_config_vlan(sc, (uint8_t) sc->if_id, vtags, ntags, 1, 0); } else status = oce_config_vlan(sc, (uint8_t) sc->if_id, NULL, 0, 1, 1); return status; } static void oce_mac_addr_set(POCE_SOFTC sc) { uint32_t old_pmac_id = sc->pmac_id; int status = 0; status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr, sc->macaddr.size_of_struct); if (!status) return; status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)), sc->if_id, &sc->pmac_id); if (!status) { status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id); bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr, sc->macaddr.size_of_struct); } if (status) device_printf(sc->dev, "Failed update macaddress\n"); } static int oce_handle_passthrough(struct ifnet *ifp, caddr_t data) { POCE_SOFTC sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int rc = ENXIO; char cookie[32] = {0}; void *priv_data = ifr_data_get_ptr(ifr); void *ioctl_ptr; uint32_t req_size; struct mbx_hdr req; OCE_DMA_MEM dma_mem; struct mbx_common_get_cntl_attr *fw_cmd; if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE))) return EFAULT; if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE))) return EINVAL; ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE); if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr))) return EFAULT; req_size = le32toh(req.u0.req.request_length); if (req_size > 65536) return EINVAL; req_size += sizeof(struct mbx_hdr); rc = oce_dma_alloc(sc, req_size, &dma_mem, 0); if (rc) return ENOMEM; if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) { rc = EFAULT; goto dma_free; } rc = oce_pass_through_mbox(sc, &dma_mem, req_size); if (rc) { rc = EIO; goto dma_free; } if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size)) rc = EFAULT; /* firmware is filling all the attributes for this ioctl except the driver version..so fill it */ if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) { fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr; strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str, COMPONENT_REVISION, strlen(COMPONENT_REVISION)); } dma_free: oce_dma_free(sc, &dma_mem); return rc; } static void oce_eqd_set_periodic(POCE_SOFTC sc) { struct oce_set_eqd set_eqd[OCE_MAX_EQ]; struct oce_aic_obj *aic; struct oce_eq *eqo; uint64_t now = 0, delta; int eqd, i, num = 0; - uint32_t ips = 0; - int tps; + uint32_t tx_reqs = 0, rxpkts = 0, pps; + struct oce_wq *wq; + struct oce_rq *rq; + #define ticks_to_msecs(t) (1000 * (t) / hz) + for (i = 0 ; i < sc->neqs; i++) { eqo = sc->eq[i]; aic = &sc->aic_obj[i]; /* When setting the static eq delay from the user space */ if (!aic->enable) { + if (aic->ticks) + aic->ticks = 0; eqd = aic->et_eqd; goto modify_eqd; } + rq = sc->rq[i]; + rxpkts = rq->rx_stats.rx_pkts; + wq = sc->wq[i]; + tx_reqs = wq->tx_stats.tx_reqs; now = ticks; - /* Over flow check */ - if ((now < aic->ticks) || (eqo->intr < aic->intr_prev)) - goto done; + if (!aic->ticks || now < aic->ticks || + rxpkts < aic->prev_rxpkts || tx_reqs < aic->prev_txreqs) { + aic->prev_rxpkts = rxpkts; + aic->prev_txreqs = tx_reqs; + aic->ticks = now; + continue; + } - delta = now - aic->ticks; - tps = delta/hz; + delta = ticks_to_msecs(now - aic->ticks); - /* Interrupt rate based on elapsed ticks */ - if(tps) - ips = (uint32_t)(eqo->intr - aic->intr_prev) / tps; - - if (ips > INTR_RATE_HWM) - eqd = aic->cur_eqd + 20; - else if (ips < INTR_RATE_LWM) - eqd = aic->cur_eqd / 2; - else - goto done; - - if (eqd < 10) + pps = (((uint32_t)(rxpkts - aic->prev_rxpkts) * 1000) / delta) + + (((uint32_t)(tx_reqs - aic->prev_txreqs) * 1000) / delta); + eqd = (pps / 15000) << 2; + if (eqd < 8) eqd = 0; /* Make sure that the eq delay is in the known range */ eqd = min(eqd, aic->max_eqd); eqd = max(eqd, aic->min_eqd); + aic->prev_rxpkts = rxpkts; + aic->prev_txreqs = tx_reqs; + aic->ticks = now; + modify_eqd: if (eqd != aic->cur_eqd) { set_eqd[num].delay_multiplier = (eqd * 65)/100; set_eqd[num].eq_id = eqo->eq_id; aic->cur_eqd = eqd; num++; } -done: - aic->intr_prev = eqo->intr; - aic->ticks = now; } /* Is there atleast one eq that needs to be modified? */ - if(num) - oce_mbox_eqd_modify_periodic(sc, set_eqd, num); + for(i = 0; i < num; i += 8) { + if((num - i) >=8 ) + oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], 8); + else + oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], (num - i)); + } + } static void oce_detect_hw_error(POCE_SOFTC sc) { uint32_t ue_low = 0, ue_high = 0, ue_low_mask = 0, ue_high_mask = 0; uint32_t sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; uint32_t i; if (sc->hw_error) return; if (IS_XE201(sc)) { sliport_status = OCE_READ_REG32(sc, db, SLIPORT_STATUS_OFFSET); if (sliport_status & SLIPORT_STATUS_ERR_MASK) { sliport_err1 = OCE_READ_REG32(sc, db, SLIPORT_ERROR1_OFFSET); sliport_err2 = OCE_READ_REG32(sc, db, SLIPORT_ERROR2_OFFSET); } } else { ue_low = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW); ue_high = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HIGH); ue_low_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW_MASK); ue_high_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HI_MASK); ue_low = (ue_low & ~ue_low_mask); ue_high = (ue_high & ~ue_high_mask); } /* On certain platforms BE hardware can indicate spurious UEs. * Allow the h/w to stop working completely in case of a real UE. * Hence not setting the hw_error for UE detection. */ if (sliport_status & SLIPORT_STATUS_ERR_MASK) { sc->hw_error = TRUE; device_printf(sc->dev, "Error detected in the card\n"); } if (sliport_status & SLIPORT_STATUS_ERR_MASK) { device_printf(sc->dev, "ERR: sliport status 0x%x\n", sliport_status); device_printf(sc->dev, "ERR: sliport error1 0x%x\n", sliport_err1); device_printf(sc->dev, "ERR: sliport error2 0x%x\n", sliport_err2); } if (ue_low) { for (i = 0; ue_low; ue_low >>= 1, i++) { if (ue_low & 1) device_printf(sc->dev, "UE: %s bit set\n", ue_status_low_desc[i]); } } if (ue_high) { for (i = 0; ue_high; ue_high >>= 1, i++) { if (ue_high & 1) device_printf(sc->dev, "UE: %s bit set\n", ue_status_hi_desc[i]); } } } static void oce_local_timer(void *arg) { POCE_SOFTC sc = arg; int i = 0; oce_detect_hw_error(sc); oce_refresh_nic_stats(sc); oce_refresh_queue_stats(sc); oce_mac_addr_set(sc); /* TX Watch Dog*/ for (i = 0; i < sc->nwqs; i++) oce_tx_restart(sc, sc->wq[i]); /* calculate and set the eq delay for optimal interrupt rate */ if (IS_BE(sc) || IS_SH(sc)) oce_eqd_set_periodic(sc); callout_reset(&sc->timer, hz, oce_local_timer, sc); } +static void +oce_tx_compl_clean(POCE_SOFTC sc) +{ + struct oce_wq *wq; + int i = 0, timeo = 0, num_wqes = 0; + int pending_txqs = sc->nwqs; + /* Stop polling for compls when HW has been silent for 10ms or + * hw_error or no outstanding completions expected + */ + do { + pending_txqs = sc->nwqs; + + for_all_wq_queues(sc, wq, i) { + num_wqes = oce_wq_handler(wq); + + if(num_wqes) + timeo = 0; + + if(!wq->ring->num_used) + pending_txqs--; + } + + if (pending_txqs == 0 || ++timeo > 10 || sc->hw_error) + break; + + DELAY(1000); + } while (TRUE); + + for_all_wq_queues(sc, wq, i) { + while(wq->ring->num_used) { + LOCK(&wq->tx_compl_lock); + oce_process_tx_completion(wq); + UNLOCK(&wq->tx_compl_lock); + } + } + +} + /* NOTE : This should only be called holding * DEVICE_LOCK. */ static void oce_if_deactivate(POCE_SOFTC sc) { - int i, mtime = 0; - int wait_req = 0; + int i; struct oce_rq *rq; struct oce_wq *wq; struct oce_eq *eq; sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); - /*Wait for max of 400ms for TX completions to be done */ - while (mtime < 400) { - wait_req = 0; - for_all_wq_queues(sc, wq, i) { - if (wq->ring->num_used) { - wait_req = 1; - DELAY(1); - break; - } - } - mtime += 1; - if (!wait_req) - break; - } + oce_tx_compl_clean(sc); /* Stop intrs and finish any bottom halves pending */ oce_hw_intr_disable(sc); /* Since taskqueue_drain takes a Gaint Lock, We should not acquire any other lock. So unlock device lock and require after completing taskqueue_drain. */ UNLOCK(&sc->dev_lock); for (i = 0; i < sc->intr_count; i++) { if (sc->intrs[i].tq != NULL) { taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task); } } LOCK(&sc->dev_lock); /* Delete RX queue in card with flush param */ oce_stop_rx(sc); /* Invalidate any pending cq and eq entries*/ for_all_evnt_queues(sc, eq, i) oce_drain_eq(eq); for_all_rq_queues(sc, rq, i) oce_drain_rq_cq(rq); for_all_wq_queues(sc, wq, i) oce_drain_wq_cq(wq); /* But still we need to get MCC aync events. So enable intrs and also arm first EQ */ oce_hw_intr_enable(sc); oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE); DELAY(10); } static void oce_if_activate(POCE_SOFTC sc) { struct oce_eq *eq; struct oce_rq *rq; struct oce_wq *wq; int i, rc = 0; sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; oce_hw_intr_disable(sc); oce_start_rx(sc); for_all_rq_queues(sc, rq, i) { rc = oce_start_rq(rq); if (rc) device_printf(sc->dev, "Unable to start RX\n"); } for_all_wq_queues(sc, wq, i) { rc = oce_start_wq(wq); if (rc) device_printf(sc->dev, "Unable to start TX\n"); } for_all_evnt_queues(sc, eq, i) oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE); oce_hw_intr_enable(sc); } static void process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe) { /* Update Link status */ if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) == ASYNC_EVENT_LINK_UP) { sc->link_status = ASYNC_EVENT_LINK_UP; if_link_state_change(sc->ifp, LINK_STATE_UP); } else { sc->link_status = ASYNC_EVENT_LINK_DOWN; if_link_state_change(sc->ifp, LINK_STATE_DOWN); } } +static void oce_async_grp5_osbmc_process(POCE_SOFTC sc, + struct oce_async_evt_grp5_os2bmc *evt) +{ + DW_SWAP(evt, sizeof(struct oce_async_evt_grp5_os2bmc)); + if (evt->u.s.mgmt_enable) + sc->flags |= OCE_FLAGS_OS2BMC; + else + return; + + sc->bmc_filt_mask = evt->u.s.arp_filter; + sc->bmc_filt_mask |= (evt->u.s.dhcp_client_filt << 1); + sc->bmc_filt_mask |= (evt->u.s.dhcp_server_filt << 2); + sc->bmc_filt_mask |= (evt->u.s.net_bios_filt << 3); + sc->bmc_filt_mask |= (evt->u.s.bcast_filt << 4); + sc->bmc_filt_mask |= (evt->u.s.ipv6_nbr_filt << 5); + sc->bmc_filt_mask |= (evt->u.s.ipv6_ra_filt << 6); + sc->bmc_filt_mask |= (evt->u.s.ipv6_ras_filt << 7); + sc->bmc_filt_mask |= (evt->u.s.mcast_filt << 8); +} + + +static void oce_process_grp5_events(POCE_SOFTC sc, struct oce_mq_cqe *cqe) +{ + struct oce_async_event_grp5_pvid_state *gcqe; + struct oce_async_evt_grp5_os2bmc *bmccqe; + + switch (cqe->u0.s.async_type) { + case ASYNC_EVENT_PVID_STATE: + /* GRP5 PVID */ + gcqe = (struct oce_async_event_grp5_pvid_state *)cqe; + if (gcqe->enabled) + sc->pvid = gcqe->tag & VLAN_VID_MASK; + else + sc->pvid = 0; + break; + case ASYNC_EVENT_OS2BMC: + bmccqe = (struct oce_async_evt_grp5_os2bmc *)cqe; + oce_async_grp5_osbmc_process(sc, bmccqe); + break; + default: + break; + } +} + /* Handle the Completion Queue for the Mailbox/Async notifications */ uint16_t oce_mq_handler(void *arg) { struct oce_mq *mq = (struct oce_mq *)arg; POCE_SOFTC sc = mq->parent; struct oce_cq *cq = mq->cq; int num_cqes = 0, evt_type = 0, optype = 0; struct oce_mq_cqe *cqe; struct oce_async_cqe_link_state *acqe; - struct oce_async_event_grp5_pvid_state *gcqe; struct oce_async_event_qnq *dbgcqe; bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe); while (cqe->u0.dw[3]) { DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe)); if (cqe->u0.s.async_event) { evt_type = cqe->u0.s.event_type; optype = cqe->u0.s.async_type; if (evt_type == ASYNC_EVENT_CODE_LINK_STATE) { /* Link status evt */ acqe = (struct oce_async_cqe_link_state *)cqe; process_link_state(sc, acqe); - } else if ((evt_type == ASYNC_EVENT_GRP5) && - (optype == ASYNC_EVENT_PVID_STATE)) { - /* GRP5 PVID */ - gcqe = - (struct oce_async_event_grp5_pvid_state *)cqe; - if (gcqe->enabled) - sc->pvid = gcqe->tag & VLAN_VID_MASK; - else - sc->pvid = 0; - - } - else if(evt_type == ASYNC_EVENT_CODE_DEBUG && - optype == ASYNC_EVENT_DEBUG_QNQ) { - dbgcqe = - (struct oce_async_event_qnq *)cqe; + } else if (evt_type == ASYNC_EVENT_GRP5) { + oce_process_grp5_events(sc, cqe); + } else if (evt_type == ASYNC_EVENT_CODE_DEBUG && + optype == ASYNC_EVENT_DEBUG_QNQ) { + dbgcqe = (struct oce_async_event_qnq *)cqe; if(dbgcqe->valid) sc->qnqid = dbgcqe->vlan_tag; sc->qnq_debug_event = TRUE; } } cqe->u0.dw[3] = 0; RING_GET(cq->ring, 1); bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe); num_cqes++; } if (num_cqes) oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE); return 0; } static void setup_max_queues_want(POCE_SOFTC sc) { /* Check if it is FLEX machine. Is so dont use RSS */ if ((sc->function_mode & FNM_FLEX10_MODE) || (sc->function_mode & FNM_UMC_MODE) || (sc->function_mode & FNM_VNIC_MODE) || (!is_rss_enabled(sc)) || IS_BE2(sc)) { sc->nrqs = 1; sc->nwqs = 1; } else { sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1; sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs); } if (IS_BE2(sc) && is_rss_enabled(sc)) sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1; } static void update_queues_got(POCE_SOFTC sc) { if (is_rss_enabled(sc)) { sc->nrqs = sc->intr_count + 1; sc->nwqs = sc->intr_count; } else { sc->nrqs = 1; sc->nwqs = 1; } if (IS_BE2(sc)) sc->nwqs = 1; } static int oce_check_ipv6_ext_hdr(struct mbuf *m) { struct ether_header *eh = mtod(m, struct ether_header *); caddr_t m_datatemp = m->m_data; if (eh->ether_type == htons(ETHERTYPE_IPV6)) { m->m_data += sizeof(struct ether_header); struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); if((ip6->ip6_nxt != IPPROTO_TCP) && \ (ip6->ip6_nxt != IPPROTO_UDP)){ struct ip6_ext *ip6e = NULL; m->m_data += sizeof(struct ip6_hdr); ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *); if(ip6e->ip6e_len == 0xff) { m->m_data = m_datatemp; return TRUE; } } m->m_data = m_datatemp; } return FALSE; } static int is_be3_a1(POCE_SOFTC sc) { if((sc->flags & OCE_FLAGS_BE3) && ((sc->asic_revision & 0xFF) < 2)) { return TRUE; } return FALSE; } static struct mbuf * oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete) { uint16_t vlan_tag = 0; if(!M_WRITABLE(m)) return NULL; /* Embed vlan tag in the packet if it is not part of it */ if(m->m_flags & M_VLANTAG) { vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag); m->m_flags &= ~M_VLANTAG; } /* if UMC, ignore vlan tag insertion and instead insert pvid */ if(sc->pvid) { if(!vlan_tag) vlan_tag = sc->pvid; - *complete = FALSE; + if (complete) + *complete = FALSE; } if(vlan_tag) { m = ether_vlanencap(m, vlan_tag); } if(sc->qnqid) { m = ether_vlanencap(m, sc->qnqid); - *complete = FALSE; + + if (complete) + *complete = FALSE; } return m; } static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m) { if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \ oce_check_ipv6_ext_hdr(m)) { return TRUE; } return FALSE; } static void oce_get_config(POCE_SOFTC sc) { int rc = 0; uint32_t max_rss = 0; if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native)) max_rss = OCE_LEGACY_MODE_RSS; else max_rss = OCE_MAX_RSS; if (!IS_BE(sc)) { rc = oce_get_profile_config(sc, max_rss); if (rc) { sc->nwqs = OCE_MAX_WQ; sc->nrssqs = max_rss; sc->nrqs = sc->nrssqs + 1; } } else { /* For BE3 don't rely on fw for determining the resources */ sc->nrssqs = max_rss; sc->nrqs = sc->nrssqs + 1; sc->nwqs = OCE_MAX_WQ; sc->max_vlans = MAX_VLANFILTER_SIZE; } +} + +static void +oce_rdma_close(void) +{ + if (oce_rdma_if != NULL) { + oce_rdma_if = NULL; + } +} + +static void +oce_get_mac_addr(POCE_SOFTC sc, uint8_t *macaddr) +{ + memcpy(macaddr, sc->macaddr.mac_addr, 6); +} + +int +oce_register_rdma(POCE_RDMA_INFO rdma_info, POCE_RDMA_IF rdma_if) +{ + POCE_SOFTC sc; + struct oce_dev_info di; + int i; + + if ((rdma_info == NULL) || (rdma_if == NULL)) { + return -EINVAL; + } + + if ((rdma_info->size != OCE_RDMA_INFO_SIZE) || + (rdma_if->size != OCE_RDMA_IF_SIZE)) { + return -ENXIO; + } + + rdma_info->close = oce_rdma_close; + rdma_info->mbox_post = oce_mbox_post; + rdma_info->common_req_hdr_init = mbx_common_req_hdr_init; + rdma_info->get_mac_addr = oce_get_mac_addr; + + oce_rdma_if = rdma_if; + + sc = softc_head; + while (sc != NULL) { + if (oce_rdma_if->announce != NULL) { + memset(&di, 0, sizeof(di)); + di.dev = sc->dev; + di.softc = sc; + di.ifp = sc->ifp; + di.db_bhandle = sc->db_bhandle; + di.db_btag = sc->db_btag; + di.db_page_size = 4096; + if (sc->flags & OCE_FLAGS_USING_MSIX) { + di.intr_mode = OCE_INTERRUPT_MODE_MSIX; + } else if (sc->flags & OCE_FLAGS_USING_MSI) { + di.intr_mode = OCE_INTERRUPT_MODE_MSI; + } else { + di.intr_mode = OCE_INTERRUPT_MODE_INTX; + } + di.dev_family = OCE_GEN2_FAMILY; // fixme: must detect skyhawk + if (di.intr_mode != OCE_INTERRUPT_MODE_INTX) { + di.msix.num_vectors = sc->intr_count + sc->roce_intr_count; + di.msix.start_vector = sc->intr_count; + for (i=0; iintrs[i].vector; + } + } else { + } + memcpy(di.mac_addr, sc->macaddr.mac_addr, 6); + di.vendor_id = pci_get_vendor(sc->dev); + di.dev_id = pci_get_device(sc->dev); + + if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) { + di.flags |= OCE_RDMA_INFO_RDMA_SUPPORTED; + } + + rdma_if->announce(&di); + sc = sc->next; + } + } + + return 0; +} + +static void +oce_read_env_variables( POCE_SOFTC sc ) +{ + char *value = NULL; + int rc = 0; + + /* read if user wants to enable hwlro or swlro */ + //value = getenv("oce_enable_hwlro"); + if(value && IS_SH(sc)) { + sc->enable_hwlro = strtol(value, NULL, 10); + if(sc->enable_hwlro) { + rc = oce_mbox_nic_query_lro_capabilities(sc, NULL, NULL); + if(rc) { + device_printf(sc->dev, "no hardware lro support\n"); + device_printf(sc->dev, "software lro enabled\n"); + sc->enable_hwlro = 0; + }else { + device_printf(sc->dev, "hardware lro enabled\n"); + oce_max_rsp_handled = 32; + } + }else { + device_printf(sc->dev, "software lro enabled\n"); + } + }else { + sc->enable_hwlro = 0; + } + + /* read mbuf size */ + //value = getenv("oce_rq_buf_size"); + if(value && IS_SH(sc)) { + oce_rq_buf_size = strtol(value, NULL, 10); + switch(oce_rq_buf_size) { + case 2048: + case 4096: + case 9216: + case 16384: + break; + + default: + device_printf(sc->dev, " Supported oce_rq_buf_size values are 2K, 4K, 9K, 16K \n"); + oce_rq_buf_size = 2048; + } + } + + return; } Index: stable/11/sys/dev/oce/oce_if.h =================================================================== --- stable/11/sys/dev/oce/oce_if.h (revision 338937) +++ stable/11/sys/dev/oce/oce_if.h (revision 338938) @@ -1,1161 +1,1265 @@ /*- * Copyright (C) 2013 Emulex * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Emulex Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * freebsd-drivers@emulex.com * * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 */ /* $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include "oce_hw.h" /* OCE device driver module component revision informaiton */ -#define COMPONENT_REVISION "10.0.664.0" +#define COMPONENT_REVISION "11.0.50.0" /* OCE devices supported by this driver */ #define PCI_VENDOR_EMULEX 0x10df /* Emulex */ #define PCI_VENDOR_SERVERENGINES 0x19a2 /* ServerEngines (BE) */ #define PCI_PRODUCT_BE2 0x0700 /* BE2 network adapter */ #define PCI_PRODUCT_BE3 0x0710 /* BE3 network adapter */ #define PCI_PRODUCT_XE201 0xe220 /* XE201 network adapter */ #define PCI_PRODUCT_XE201_VF 0xe228 /* XE201 with VF in Lancer */ #define PCI_PRODUCT_SH 0x0720 /* Skyhawk network adapter */ #define IS_BE(sc) (((sc->flags & OCE_FLAGS_BE3) | \ (sc->flags & OCE_FLAGS_BE2))? 1:0) #define IS_BE3(sc) (sc->flags & OCE_FLAGS_BE3) #define IS_BE2(sc) (sc->flags & OCE_FLAGS_BE2) #define IS_XE201(sc) ((sc->flags & OCE_FLAGS_XE201) ? 1:0) #define HAS_A0_CHIP(sc) ((sc->flags & OCE_FLAGS_HAS_A0_CHIP) ? 1:0) #define IS_SH(sc) ((sc->flags & OCE_FLAGS_SH) ? 1 : 0) #define is_be_mode_mc(sc) ((sc->function_mode & FNM_FLEX10_MODE) || \ (sc->function_mode & FNM_UMC_MODE) || \ (sc->function_mode & FNM_VNIC_MODE)) #define OCE_FUNCTION_CAPS_SUPER_NIC 0x40 #define IS_PROFILE_SUPER_NIC(sc) (sc->function_caps & OCE_FUNCTION_CAPS_SUPER_NIC) /* proportion Service Level Interface queues */ #define OCE_MAX_UNITS 2 #define OCE_MAX_PPORT OCE_MAX_UNITS #define OCE_MAX_VPORT OCE_MAX_UNITS extern int mp_ncpus; /* system's total active cpu cores */ #define OCE_NCPUS mp_ncpus /* This should be powers of 2. Like 2,4,8 & 16 */ #define OCE_MAX_RSS 8 #define OCE_LEGACY_MODE_RSS 4 /* For BE3 Legacy mode*/ #define is_rss_enabled(sc) ((sc->function_caps & FNC_RSS) && !is_be_mode_mc(sc)) #define OCE_MIN_RQ 1 #define OCE_MIN_WQ 1 #define OCE_MAX_RQ OCE_MAX_RSS + 1 /* one default queue */ #define OCE_MAX_WQ 8 #define OCE_MAX_EQ 32 #define OCE_MAX_CQ OCE_MAX_RQ + OCE_MAX_WQ + 1 /* one MCC queue */ #define OCE_MAX_CQ_EQ 8 /* Max CQ that can attached to an EQ */ #define OCE_DEFAULT_WQ_EQD 16 #define OCE_MAX_PACKET_Q 16 -#define OCE_RQ_BUF_SIZE 2048 #define OCE_LSO_MAX_SIZE (64 * 1024) #define LONG_TIMEOUT 30 #define OCE_MAX_JUMBO_FRAME_SIZE 9018 #define OCE_MAX_MTU (OCE_MAX_JUMBO_FRAME_SIZE - \ ETHER_VLAN_ENCAP_LEN - \ ETHER_HDR_LEN) +#define OCE_RDMA_VECTORS 2 + #define OCE_MAX_TX_ELEMENTS 29 #define OCE_MAX_TX_DESC 1024 #define OCE_MAX_TX_SIZE 65535 +#define OCE_MAX_TSO_SIZE (65535 - ETHER_HDR_LEN) #define OCE_MAX_RX_SIZE 4096 #define OCE_MAX_RQ_POSTS 255 +#define OCE_HWLRO_MAX_RQ_POSTS 64 #define OCE_DEFAULT_PROMISCUOUS 0 #define RSS_ENABLE_IPV4 0x1 #define RSS_ENABLE_TCP_IPV4 0x2 #define RSS_ENABLE_IPV6 0x4 #define RSS_ENABLE_TCP_IPV6 0x8 #define INDIRECTION_TABLE_ENTRIES 128 /* flow control definitions */ #define OCE_FC_NONE 0x00000000 #define OCE_FC_TX 0x00000001 #define OCE_FC_RX 0x00000002 #define OCE_DEFAULT_FLOW_CONTROL (OCE_FC_TX | OCE_FC_RX) /* Interface capabilities to give device when creating interface */ #define OCE_CAPAB_FLAGS (MBX_RX_IFACE_FLAGS_BROADCAST | \ MBX_RX_IFACE_FLAGS_UNTAGGED | \ MBX_RX_IFACE_FLAGS_PROMISCUOUS | \ MBX_RX_IFACE_FLAGS_VLAN_PROMISCUOUS | \ MBX_RX_IFACE_FLAGS_MCAST_PROMISCUOUS | \ MBX_RX_IFACE_FLAGS_RSS | \ MBX_RX_IFACE_FLAGS_PASS_L3L4_ERR) /* Interface capabilities to enable by default (others set dynamically) */ #define OCE_CAPAB_ENABLE (MBX_RX_IFACE_FLAGS_BROADCAST | \ MBX_RX_IFACE_FLAGS_UNTAGGED | \ MBX_RX_IFACE_FLAGS_PASS_L3L4_ERR) #define OCE_IF_HWASSIST (CSUM_IP | CSUM_TCP | CSUM_UDP) #define OCE_IF_CAPABILITIES (IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | \ IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | \ IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU) #define OCE_IF_HWASSIST_NONE 0 #define OCE_IF_CAPABILITIES_NONE 0 #define ETH_ADDR_LEN 6 #define MAX_VLANFILTER_SIZE 64 #define MAX_VLANS 4096 #define upper_32_bits(n) ((uint32_t)(((n) >> 16) >> 16)) #define BSWAP_8(x) ((x) & 0xff) #define BSWAP_16(x) ((BSWAP_8(x) << 8) | BSWAP_8((x) >> 8)) #define BSWAP_32(x) ((BSWAP_16(x) << 16) | \ BSWAP_16((x) >> 16)) #define BSWAP_64(x) ((BSWAP_32(x) << 32) | \ BSWAP_32((x) >> 32)) #define for_all_wq_queues(sc, wq, i) \ for (i = 0, wq = sc->wq[0]; i < sc->nwqs; i++, wq = sc->wq[i]) #define for_all_rq_queues(sc, rq, i) \ for (i = 0, rq = sc->rq[0]; i < sc->nrqs; i++, rq = sc->rq[i]) #define for_all_rss_queues(sc, rq, i) \ for (i = 0, rq = sc->rq[i + 1]; i < (sc->nrqs - 1); \ i++, rq = sc->rq[i + 1]) #define for_all_evnt_queues(sc, eq, i) \ for (i = 0, eq = sc->eq[0]; i < sc->neqs; i++, eq = sc->eq[i]) #define for_all_cq_queues(sc, cq, i) \ for (i = 0, cq = sc->cq[0]; i < sc->ncqs; i++, cq = sc->cq[i]) /* Flash specific */ #define IOCTL_COOKIE "SERVERENGINES CORP" #define MAX_FLASH_COMP 32 #define IMG_ISCSI 160 #define IMG_REDBOOT 224 #define IMG_BIOS 34 #define IMG_PXEBIOS 32 #define IMG_FCOEBIOS 33 #define IMG_ISCSI_BAK 176 #define IMG_FCOE 162 #define IMG_FCOE_BAK 178 #define IMG_NCSI 16 #define IMG_PHY 192 #define FLASHROM_OPER_FLASH 1 #define FLASHROM_OPER_SAVE 2 #define FLASHROM_OPER_REPORT 4 #define FLASHROM_OPER_FLASH_PHY 9 #define FLASHROM_OPER_SAVE_PHY 10 #define TN_8022 13 enum { PHY_TYPE_CX4_10GB = 0, PHY_TYPE_XFP_10GB, PHY_TYPE_SFP_1GB, PHY_TYPE_SFP_PLUS_10GB, PHY_TYPE_KR_10GB, PHY_TYPE_KX4_10GB, PHY_TYPE_BASET_10GB, PHY_TYPE_BASET_1GB, PHY_TYPE_BASEX_1GB, PHY_TYPE_SGMII, PHY_TYPE_DISABLED = 255 }; /** * @brief Define and hold all necessary info for a single interrupt */ #define OCE_MAX_MSI 32 /* Message Signaled Interrupts */ #define OCE_MAX_MSIX 2048 /* PCI Express MSI Interrrupts */ typedef struct oce_intr_info { void *tag; /* cookie returned by bus_setup_intr */ struct resource *intr_res; /* PCI resource container */ int irq_rr; /* resource id for the interrupt */ struct oce_softc *sc; /* pointer to the parent soft c */ struct oce_eq *eq; /* pointer to the connected EQ */ struct taskqueue *tq; /* Associated task queue */ struct task task; /* task queue task */ char task_name[32]; /* task name */ int vector; /* interrupt vector number */ } OCE_INTR_INFO, *POCE_INTR_INFO; /* Ring related */ #define GET_Q_NEXT(_START, _STEP, _END) \ (((_START) + (_STEP)) < (_END) ? ((_START) + (_STEP)) \ : (((_START) + (_STEP)) - (_END))) #define DBUF_PA(obj) ((obj)->addr) #define DBUF_VA(obj) ((obj)->ptr) #define DBUF_TAG(obj) ((obj)->tag) #define DBUF_MAP(obj) ((obj)->map) #define DBUF_SYNC(obj, flags) \ (void) bus_dmamap_sync(DBUF_TAG(obj), DBUF_MAP(obj), (flags)) #define RING_NUM_PENDING(ring) ring->num_used #define RING_FULL(ring) (ring->num_used == ring->num_items) #define RING_EMPTY(ring) (ring->num_used == 0) #define RING_NUM_FREE(ring) \ (uint32_t)(ring->num_items - ring->num_used) #define RING_GET(ring, n) \ ring->cidx = GET_Q_NEXT(ring->cidx, n, ring->num_items) #define RING_PUT(ring, n) \ ring->pidx = GET_Q_NEXT(ring->pidx, n, ring->num_items) #define RING_GET_CONSUMER_ITEM_VA(ring, type) \ (void*)((type *)DBUF_VA(&ring->dma) + ring->cidx) #define RING_GET_CONSUMER_ITEM_PA(ring, type) \ (uint64_t)(((type *)DBUF_PA(ring->dbuf)) + ring->cidx) #define RING_GET_PRODUCER_ITEM_VA(ring, type) \ (void *)(((type *)DBUF_VA(&ring->dma)) + ring->pidx) #define RING_GET_PRODUCER_ITEM_PA(ring, type) \ (uint64_t)(((type *)DBUF_PA(ring->dbuf)) + ring->pidx) #define OCE_DMAPTR(o, c) ((c *)(o)->ptr) struct oce_packet_desc { struct mbuf *mbuf; bus_dmamap_t map; int nsegs; uint32_t wqe_idx; }; typedef struct oce_dma_mem { bus_dma_tag_t tag; bus_dmamap_t map; void *ptr; bus_addr_t paddr; } OCE_DMA_MEM, *POCE_DMA_MEM; typedef struct oce_ring_buffer_s { uint16_t cidx; /* Get ptr */ uint16_t pidx; /* Put Ptr */ size_t item_size; size_t num_items; uint32_t num_used; OCE_DMA_MEM dma; } oce_ring_buffer_t; /* Stats */ #define OCE_UNICAST_PACKET 0 #define OCE_MULTICAST_PACKET 1 #define OCE_BROADCAST_PACKET 2 #define OCE_RSVD_PACKET 3 struct oce_rx_stats { /* Total Receive Stats*/ uint64_t t_rx_pkts; uint64_t t_rx_bytes; uint32_t t_rx_frags; uint32_t t_rx_mcast_pkts; uint32_t t_rx_ucast_pkts; uint32_t t_rxcp_errs; }; struct oce_tx_stats { /*Total Transmit Stats */ uint64_t t_tx_pkts; uint64_t t_tx_bytes; uint32_t t_tx_reqs; uint32_t t_tx_stops; uint32_t t_tx_wrbs; uint32_t t_tx_compl; uint32_t t_ipv6_ext_hdr_tx_drop; }; struct oce_be_stats { uint8_t be_on_die_temperature; uint32_t be_tx_events; uint32_t eth_red_drops; uint32_t rx_drops_no_pbuf; uint32_t rx_drops_no_txpb; uint32_t rx_drops_no_erx_descr; uint32_t rx_drops_no_tpre_descr; uint32_t rx_drops_too_many_frags; uint32_t rx_drops_invalid_ring; uint32_t forwarded_packets; uint32_t rx_drops_mtu; uint32_t rx_crc_errors; uint32_t rx_alignment_symbol_errors; uint32_t rx_pause_frames; uint32_t rx_priority_pause_frames; uint32_t rx_control_frames; uint32_t rx_in_range_errors; uint32_t rx_out_range_errors; uint32_t rx_frame_too_long; uint32_t rx_address_match_errors; uint32_t rx_dropped_too_small; uint32_t rx_dropped_too_short; uint32_t rx_dropped_header_too_small; uint32_t rx_dropped_tcp_length; uint32_t rx_dropped_runt; uint32_t rx_ip_checksum_errs; uint32_t rx_tcp_checksum_errs; uint32_t rx_udp_checksum_errs; uint32_t rx_switched_unicast_packets; uint32_t rx_switched_multicast_packets; uint32_t rx_switched_broadcast_packets; uint32_t tx_pauseframes; uint32_t tx_priority_pauseframes; uint32_t tx_controlframes; uint32_t rxpp_fifo_overflow_drop; uint32_t rx_input_fifo_overflow_drop; uint32_t pmem_fifo_overflow_drop; uint32_t jabber_events; }; struct oce_xe201_stats { uint64_t tx_pkts; uint64_t tx_unicast_pkts; uint64_t tx_multicast_pkts; uint64_t tx_broadcast_pkts; uint64_t tx_bytes; uint64_t tx_unicast_bytes; uint64_t tx_multicast_bytes; uint64_t tx_broadcast_bytes; uint64_t tx_discards; uint64_t tx_errors; uint64_t tx_pause_frames; uint64_t tx_pause_on_frames; uint64_t tx_pause_off_frames; uint64_t tx_internal_mac_errors; uint64_t tx_control_frames; uint64_t tx_pkts_64_bytes; uint64_t tx_pkts_65_to_127_bytes; uint64_t tx_pkts_128_to_255_bytes; uint64_t tx_pkts_256_to_511_bytes; uint64_t tx_pkts_512_to_1023_bytes; uint64_t tx_pkts_1024_to_1518_bytes; uint64_t tx_pkts_1519_to_2047_bytes; uint64_t tx_pkts_2048_to_4095_bytes; uint64_t tx_pkts_4096_to_8191_bytes; uint64_t tx_pkts_8192_to_9216_bytes; uint64_t tx_lso_pkts; uint64_t rx_pkts; uint64_t rx_unicast_pkts; uint64_t rx_multicast_pkts; uint64_t rx_broadcast_pkts; uint64_t rx_bytes; uint64_t rx_unicast_bytes; uint64_t rx_multicast_bytes; uint64_t rx_broadcast_bytes; uint32_t rx_unknown_protos; uint64_t rx_discards; uint64_t rx_errors; uint64_t rx_crc_errors; uint64_t rx_alignment_errors; uint64_t rx_symbol_errors; uint64_t rx_pause_frames; uint64_t rx_pause_on_frames; uint64_t rx_pause_off_frames; uint64_t rx_frames_too_long; uint64_t rx_internal_mac_errors; uint32_t rx_undersize_pkts; uint32_t rx_oversize_pkts; uint32_t rx_fragment_pkts; uint32_t rx_jabbers; uint64_t rx_control_frames; uint64_t rx_control_frames_unknown_opcode; uint32_t rx_in_range_errors; uint32_t rx_out_of_range_errors; uint32_t rx_address_match_errors; uint32_t rx_vlan_mismatch_errors; uint32_t rx_dropped_too_small; uint32_t rx_dropped_too_short; uint32_t rx_dropped_header_too_small; uint32_t rx_dropped_invalid_tcp_length; uint32_t rx_dropped_runt; uint32_t rx_ip_checksum_errors; uint32_t rx_tcp_checksum_errors; uint32_t rx_udp_checksum_errors; uint32_t rx_non_rss_pkts; uint64_t rx_ipv4_pkts; uint64_t rx_ipv6_pkts; uint64_t rx_ipv4_bytes; uint64_t rx_ipv6_bytes; uint64_t rx_nic_pkts; uint64_t rx_tcp_pkts; uint64_t rx_iscsi_pkts; uint64_t rx_management_pkts; uint64_t rx_switched_unicast_pkts; uint64_t rx_switched_multicast_pkts; uint64_t rx_switched_broadcast_pkts; uint64_t num_forwards; uint32_t rx_fifo_overflow; uint32_t rx_input_fifo_overflow; uint64_t rx_drops_too_many_frags; uint32_t rx_drops_invalid_queue; uint64_t rx_drops_mtu; uint64_t rx_pkts_64_bytes; uint64_t rx_pkts_65_to_127_bytes; uint64_t rx_pkts_128_to_255_bytes; uint64_t rx_pkts_256_to_511_bytes; uint64_t rx_pkts_512_to_1023_bytes; uint64_t rx_pkts_1024_to_1518_bytes; uint64_t rx_pkts_1519_to_2047_bytes; uint64_t rx_pkts_2048_to_4095_bytes; uint64_t rx_pkts_4096_to_8191_bytes; uint64_t rx_pkts_8192_to_9216_bytes; }; struct oce_drv_stats { struct oce_rx_stats rx; struct oce_tx_stats tx; union { struct oce_be_stats be; struct oce_xe201_stats xe201; } u0; }; #define INTR_RATE_HWM 15000 #define INTR_RATE_LWM 10000 #define OCE_MAX_EQD 128u -#define OCE_MIN_EQD 50u +#define OCE_MIN_EQD 0u struct oce_set_eqd { uint32_t eq_id; uint32_t phase; uint32_t delay_multiplier; }; struct oce_aic_obj { /* Adaptive interrupt coalescing (AIC) info */ boolean_t enable; uint32_t min_eqd; /* in usecs */ uint32_t max_eqd; /* in usecs */ uint32_t cur_eqd; /* in usecs */ uint32_t et_eqd; /* configured value when aic is off */ uint64_t ticks; - uint64_t intr_prev; + uint64_t prev_rxpkts; + uint64_t prev_txreqs; }; #define MAX_LOCK_DESC_LEN 32 struct oce_lock { struct mtx mutex; char name[MAX_LOCK_DESC_LEN+1]; }; #define OCE_LOCK struct oce_lock #define LOCK_CREATE(lock, desc) { \ strncpy((lock)->name, (desc), MAX_LOCK_DESC_LEN); \ (lock)->name[MAX_LOCK_DESC_LEN] = '\0'; \ mtx_init(&(lock)->mutex, (lock)->name, NULL, MTX_DEF); \ } #define LOCK_DESTROY(lock) \ if (mtx_initialized(&(lock)->mutex))\ mtx_destroy(&(lock)->mutex) #define TRY_LOCK(lock) mtx_trylock(&(lock)->mutex) #define LOCK(lock) mtx_lock(&(lock)->mutex) #define LOCKED(lock) mtx_owned(&(lock)->mutex) #define UNLOCK(lock) mtx_unlock(&(lock)->mutex) #define DEFAULT_MQ_MBOX_TIMEOUT (5 * 1000 * 1000) #define MBX_READY_TIMEOUT (1 * 1000 * 1000) #define DEFAULT_DRAIN_TIME 200 #define MBX_TIMEOUT_SEC 5 #define STAT_TIMEOUT 2000000 /* size of the packet descriptor array in a transmit queue */ #define OCE_TX_RING_SIZE 2048 #define OCE_RX_RING_SIZE 1024 #define OCE_WQ_PACKET_ARRAY_SIZE (OCE_TX_RING_SIZE/2) #define OCE_RQ_PACKET_ARRAY_SIZE (OCE_RX_RING_SIZE) struct oce_dev; enum eq_len { EQ_LEN_256 = 256, EQ_LEN_512 = 512, EQ_LEN_1024 = 1024, EQ_LEN_2048 = 2048, EQ_LEN_4096 = 4096 }; enum eqe_size { EQE_SIZE_4 = 4, EQE_SIZE_16 = 16 }; enum qtype { QTYPE_EQ, QTYPE_MQ, QTYPE_WQ, QTYPE_RQ, QTYPE_CQ, QTYPE_RSS }; typedef enum qstate_e { QDELETED = 0x0, QCREATED = 0x1 } qstate_t; struct eq_config { enum eq_len q_len; enum eqe_size item_size; uint32_t q_vector_num; uint8_t min_eqd; uint8_t max_eqd; uint8_t cur_eqd; uint8_t pad; }; struct oce_eq { uint32_t eq_id; void *parent; void *cb_context; oce_ring_buffer_t *ring; uint32_t ref_count; qstate_t qstate; struct oce_cq *cq[OCE_MAX_CQ_EQ]; int cq_valid; struct eq_config eq_cfg; int vector; uint64_t intr; }; enum cq_len { CQ_LEN_256 = 256, CQ_LEN_512 = 512, - CQ_LEN_1024 = 1024 + CQ_LEN_1024 = 1024, + CQ_LEN_2048 = 2048 }; struct cq_config { enum cq_len q_len; uint32_t item_size; boolean_t is_eventable; boolean_t sol_eventable; boolean_t nodelay; uint16_t dma_coalescing; }; typedef uint16_t(*cq_handler_t) (void *arg1); struct oce_cq { uint32_t cq_id; void *parent; struct oce_eq *eq; cq_handler_t cq_handler; void *cb_arg; oce_ring_buffer_t *ring; qstate_t qstate; struct cq_config cq_cfg; uint32_t ref_count; }; struct mq_config { uint32_t eqd; uint8_t q_len; uint8_t pad[3]; }; struct oce_mq { void *parent; oce_ring_buffer_t *ring; uint32_t mq_id; struct oce_cq *cq; struct oce_cq *async_cq; uint32_t mq_free; qstate_t qstate; struct mq_config cfg; }; struct oce_mbx_ctx { struct oce_mbx *mbx; void (*cb) (void *ctx); void *cb_ctx; }; struct wq_config { uint8_t wq_type; uint16_t buf_size; uint8_t pad[1]; uint32_t q_len; uint16_t pd_id; uint16_t pci_fn_num; uint32_t eqd; /* interrupt delay */ uint32_t nbufs; uint32_t nhdl; }; struct oce_tx_queue_stats { uint64_t tx_pkts; uint64_t tx_bytes; uint32_t tx_reqs; uint32_t tx_stops; /* number of times TX Q was stopped */ uint32_t tx_wrbs; uint32_t tx_compl; uint32_t tx_rate; uint32_t ipv6_ext_hdr_tx_drop; }; struct oce_wq { OCE_LOCK tx_lock; + OCE_LOCK tx_compl_lock; void *parent; oce_ring_buffer_t *ring; struct oce_cq *cq; bus_dma_tag_t tag; struct oce_packet_desc pckts[OCE_WQ_PACKET_ARRAY_SIZE]; uint32_t pkt_desc_tail; uint32_t pkt_desc_head; uint32_t wqm_used; boolean_t resched; uint32_t wq_free; uint32_t tx_deferd; uint32_t pkt_drops; qstate_t qstate; uint16_t wq_id; struct wq_config cfg; int queue_index; struct oce_tx_queue_stats tx_stats; struct buf_ring *br; struct task txtask; uint32_t db_offset; }; struct rq_config { uint32_t q_len; uint32_t frag_size; uint32_t mtu; uint32_t if_id; uint32_t is_rss_queue; uint32_t eqd; uint32_t nbufs; }; struct oce_rx_queue_stats { uint32_t rx_post_fail; uint32_t rx_ucast_pkts; uint32_t rx_compl; uint64_t rx_bytes; uint64_t rx_bytes_prev; uint64_t rx_pkts; uint32_t rx_rate; uint32_t rx_mcast_pkts; uint32_t rxcp_err; uint32_t rx_frags; uint32_t prev_rx_frags; uint32_t rx_fps; + uint32_t rx_drops_no_frags; /* HW has no fetched frags */ }; struct oce_rq { struct rq_config cfg; uint32_t rq_id; int queue_index; uint32_t rss_cpuid; void *parent; oce_ring_buffer_t *ring; struct oce_cq *cq; void *pad1; bus_dma_tag_t tag; struct oce_packet_desc pckts[OCE_RQ_PACKET_ARRAY_SIZE]; - uint32_t packets_in; - uint32_t packets_out; uint32_t pending; #ifdef notdef struct mbuf *head; struct mbuf *tail; int fragsleft; #endif qstate_t qstate; OCE_LOCK rx_lock; struct oce_rx_queue_stats rx_stats; struct lro_ctrl lro; int lro_pkts_queued; + int islro; + struct nic_hwlro_cqe_part1 *cqe_firstpart; }; struct link_status { uint8_t phys_port_speed; uint8_t logical_link_status; uint16_t qos_link_speed; }; #define OCE_FLAGS_PCIX 0x00000001 #define OCE_FLAGS_PCIE 0x00000002 #define OCE_FLAGS_MSI_CAPABLE 0x00000004 #define OCE_FLAGS_MSIX_CAPABLE 0x00000008 #define OCE_FLAGS_USING_MSI 0x00000010 #define OCE_FLAGS_USING_MSIX 0x00000020 #define OCE_FLAGS_FUNCRESET_RQD 0x00000040 #define OCE_FLAGS_VIRTUAL_PORT 0x00000080 #define OCE_FLAGS_MBOX_ENDIAN_RQD 0x00000100 #define OCE_FLAGS_BE3 0x00000200 #define OCE_FLAGS_XE201 0x00000400 #define OCE_FLAGS_BE2 0x00000800 #define OCE_FLAGS_SH 0x00001000 +#define OCE_FLAGS_OS2BMC 0x00002000 #define OCE_DEV_BE2_CFG_BAR 1 #define OCE_DEV_CFG_BAR 0 #define OCE_PCI_CSR_BAR 2 #define OCE_PCI_DB_BAR 4 typedef struct oce_softc { device_t dev; OCE_LOCK dev_lock; uint32_t flags; uint32_t pcie_link_speed; uint32_t pcie_link_width; uint8_t fn; /* PCI function number */ struct resource *devcfg_res; bus_space_tag_t devcfg_btag; bus_space_handle_t devcfg_bhandle; void *devcfg_vhandle; struct resource *csr_res; bus_space_tag_t csr_btag; bus_space_handle_t csr_bhandle; void *csr_vhandle; struct resource *db_res; bus_space_tag_t db_btag; bus_space_handle_t db_bhandle; void *db_vhandle; OCE_INTR_INFO intrs[OCE_MAX_EQ]; int intr_count; + int roce_intr_count; struct ifnet *ifp; struct ifmedia media; uint8_t link_status; uint8_t link_speed; uint8_t duplex; uint32_t qos_link_speed; uint32_t speed; + uint32_t enable_hwlro; char fw_version[32]; struct mac_address_format macaddr; OCE_DMA_MEM bsmbx; OCE_LOCK bmbx_lock; uint32_t config_number; uint32_t asic_revision; uint32_t port_id; uint32_t function_mode; uint32_t function_caps; uint32_t max_tx_rings; uint32_t max_rx_rings; struct oce_wq *wq[OCE_MAX_WQ]; /* TX work queues */ struct oce_rq *rq[OCE_MAX_RQ]; /* RX work queues */ struct oce_cq *cq[OCE_MAX_CQ]; /* Completion queues */ struct oce_eq *eq[OCE_MAX_EQ]; /* Event queues */ struct oce_mq *mq; /* Mailbox queue */ uint32_t neqs; uint32_t ncqs; uint32_t nrqs; uint32_t nwqs; uint32_t nrssqs; uint32_t tx_ring_size; uint32_t rx_ring_size; uint32_t rq_frag_size; uint32_t if_id; /* interface ID */ uint32_t nifs; /* number of adapter interfaces, 0 or 1 */ uint32_t pmac_id; /* PMAC id */ uint32_t if_cap_flags; uint32_t flow_control; uint8_t promisc; struct oce_aic_obj aic_obj[OCE_MAX_EQ]; /*Vlan Filtering related */ eventhandler_tag vlan_attach; eventhandler_tag vlan_detach; uint16_t vlans_added; uint8_t vlan_tag[MAX_VLANS]; /*stats */ OCE_DMA_MEM stats_mem; struct oce_drv_stats oce_stats_info; struct callout timer; int8_t be3_native; uint8_t hw_error; uint16_t qnq_debug_event; uint16_t qnqid; uint32_t pvid; uint32_t max_vlans; + uint32_t bmc_filt_mask; + void *rdma_context; + uint32_t rdma_flags; + struct oce_softc *next; + } OCE_SOFTC, *POCE_SOFTC; +#define OCE_RDMA_FLAG_SUPPORTED 0x00000001 /************************************************** * BUS memory read/write macros * BE3: accesses three BAR spaces (CFG, CSR, DB) * Lancer: accesses one BAR space (CFG) **************************************************/ #define OCE_READ_CSR_MPU(sc, space, o) \ ((IS_BE(sc)) ? (bus_space_read_4((sc)->space##_btag, \ (sc)->space##_bhandle,o)) \ : (bus_space_read_4((sc)->devcfg_btag, \ (sc)->devcfg_bhandle,o))) #define OCE_READ_REG32(sc, space, o) \ ((IS_BE(sc) || IS_SH(sc)) ? (bus_space_read_4((sc)->space##_btag, \ (sc)->space##_bhandle,o)) \ : (bus_space_read_4((sc)->devcfg_btag, \ (sc)->devcfg_bhandle,o))) #define OCE_READ_REG16(sc, space, o) \ ((IS_BE(sc) || IS_SH(sc)) ? (bus_space_read_2((sc)->space##_btag, \ (sc)->space##_bhandle,o)) \ : (bus_space_read_2((sc)->devcfg_btag, \ (sc)->devcfg_bhandle,o))) #define OCE_READ_REG8(sc, space, o) \ ((IS_BE(sc) || IS_SH(sc)) ? (bus_space_read_1((sc)->space##_btag, \ (sc)->space##_bhandle,o)) \ : (bus_space_read_1((sc)->devcfg_btag, \ (sc)->devcfg_bhandle,o))) #define OCE_WRITE_CSR_MPU(sc, space, o, v) \ ((IS_BE(sc)) ? (bus_space_write_4((sc)->space##_btag, \ (sc)->space##_bhandle,o,v)) \ : (bus_space_write_4((sc)->devcfg_btag, \ (sc)->devcfg_bhandle,o,v))) #define OCE_WRITE_REG32(sc, space, o, v) \ ((IS_BE(sc) || IS_SH(sc)) ? (bus_space_write_4((sc)->space##_btag, \ (sc)->space##_bhandle,o,v)) \ : (bus_space_write_4((sc)->devcfg_btag, \ (sc)->devcfg_bhandle,o,v))) #define OCE_WRITE_REG16(sc, space, o, v) \ ((IS_BE(sc) || IS_SH(sc)) ? (bus_space_write_2((sc)->space##_btag, \ (sc)->space##_bhandle,o,v)) \ : (bus_space_write_2((sc)->devcfg_btag, \ (sc)->devcfg_bhandle,o,v))) #define OCE_WRITE_REG8(sc, space, o, v) \ ((IS_BE(sc) || IS_SH(sc)) ? (bus_space_write_1((sc)->space##_btag, \ (sc)->space##_bhandle,o,v)) \ : (bus_space_write_1((sc)->devcfg_btag, \ (sc)->devcfg_bhandle,o,v))) - +void oce_rx_flush_lro(struct oce_rq *rq); /*********************************************************** * DMA memory functions ***********************************************************/ #define oce_dma_sync(d, f) bus_dmamap_sync((d)->tag, (d)->map, f) int oce_dma_alloc(POCE_SOFTC sc, bus_size_t size, POCE_DMA_MEM dma, int flags); void oce_dma_free(POCE_SOFTC sc, POCE_DMA_MEM dma); void oce_dma_map_addr(void *arg, bus_dma_segment_t * segs, int nseg, int error); void oce_destroy_ring_buffer(POCE_SOFTC sc, oce_ring_buffer_t *ring); oce_ring_buffer_t *oce_create_ring_buffer(POCE_SOFTC sc, uint32_t q_len, uint32_t num_entries); /************************************************************ * oce_hw_xxx functions ************************************************************/ int oce_clear_rx_buf(struct oce_rq *rq); int oce_hw_pci_alloc(POCE_SOFTC sc); int oce_hw_init(POCE_SOFTC sc); int oce_hw_start(POCE_SOFTC sc); int oce_create_nw_interface(POCE_SOFTC sc); int oce_pci_soft_reset(POCE_SOFTC sc); int oce_hw_update_multicast(POCE_SOFTC sc); void oce_delete_nw_interface(POCE_SOFTC sc); void oce_hw_shutdown(POCE_SOFTC sc); void oce_hw_intr_enable(POCE_SOFTC sc); void oce_hw_intr_disable(POCE_SOFTC sc); void oce_hw_pci_free(POCE_SOFTC sc); /*********************************************************** * oce_queue_xxx functions ***********************************************************/ int oce_queue_init_all(POCE_SOFTC sc); int oce_start_rq(struct oce_rq *rq); int oce_start_wq(struct oce_wq *wq); int oce_start_mq(struct oce_mq *mq); int oce_start_rx(POCE_SOFTC sc); void oce_arm_eq(POCE_SOFTC sc, int16_t qid, int npopped, uint32_t rearm, uint32_t clearint); void oce_queue_release_all(POCE_SOFTC sc); void oce_arm_cq(POCE_SOFTC sc, int16_t qid, int npopped, uint32_t rearm); void oce_drain_eq(struct oce_eq *eq); void oce_drain_mq_cq(void *arg); void oce_drain_rq_cq(struct oce_rq *rq); void oce_drain_wq_cq(struct oce_wq *wq); uint32_t oce_page_list(oce_ring_buffer_t *ring, struct phys_addr *pa_list); /*********************************************************** * cleanup functions ***********************************************************/ void oce_stop_rx(POCE_SOFTC sc); +void oce_discard_rx_comp(struct oce_rq *rq, int num_frags); +void oce_rx_cq_clean(struct oce_rq *rq); +void oce_rx_cq_clean_hwlro(struct oce_rq *rq); void oce_intr_free(POCE_SOFTC sc); void oce_free_posted_rxbuf(struct oce_rq *rq); #if defined(INET6) || defined(INET) void oce_free_lro(POCE_SOFTC sc); #endif /************************************************************ * Mailbox functions ************************************************************/ int oce_fw_clean(POCE_SOFTC sc); int oce_reset_fun(POCE_SOFTC sc); int oce_mbox_init(POCE_SOFTC sc); int oce_mbox_dispatch(POCE_SOFTC sc, uint32_t tmo_sec); int oce_get_fw_version(POCE_SOFTC sc); int oce_first_mcc_cmd(POCE_SOFTC sc); int oce_read_mac_addr(POCE_SOFTC sc, uint32_t if_id, uint8_t perm, uint8_t type, struct mac_address_format *mac); int oce_get_fw_config(POCE_SOFTC sc); int oce_if_create(POCE_SOFTC sc, uint32_t cap_flags, uint32_t en_flags, uint16_t vlan_tag, uint8_t *mac_addr, uint32_t *if_id); int oce_if_del(POCE_SOFTC sc, uint32_t if_id); int oce_config_vlan(POCE_SOFTC sc, uint32_t if_id, struct normal_vlan *vtag_arr, uint8_t vtag_cnt, uint32_t untagged, uint32_t enable_promisc); int oce_set_flow_control(POCE_SOFTC sc, uint32_t flow_control); int oce_config_nic_rss(POCE_SOFTC sc, uint32_t if_id, uint16_t enable_rss); int oce_rxf_set_promiscuous(POCE_SOFTC sc, uint8_t enable); int oce_set_common_iface_rx_filter(POCE_SOFTC sc, POCE_DMA_MEM sgl); int oce_get_link_status(POCE_SOFTC sc, struct link_status *link); int oce_mbox_get_nic_stats_v0(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem); -int oce_mbox_get_nic_stats(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem); +int oce_mbox_get_nic_stats_v1(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem); +int oce_mbox_get_nic_stats_v2(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem); int oce_mbox_get_pport_stats(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem, uint32_t reset_stats); int oce_mbox_get_vport_stats(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem, uint32_t req_size, uint32_t reset_stats); int oce_update_multicast(POCE_SOFTC sc, POCE_DMA_MEM pdma_mem); int oce_pass_through_mbox(POCE_SOFTC sc, POCE_DMA_MEM dma_mem, uint32_t req_size); int oce_mbox_macaddr_del(POCE_SOFTC sc, uint32_t if_id, uint32_t pmac_id); int oce_mbox_macaddr_add(POCE_SOFTC sc, uint8_t *mac_addr, uint32_t if_id, uint32_t *pmac_id); int oce_mbox_cmd_test_loopback(POCE_SOFTC sc, uint32_t port_num, uint32_t loopback_type, uint32_t pkt_size, uint32_t num_pkts, uint64_t pattern); int oce_mbox_cmd_set_loopback(POCE_SOFTC sc, uint8_t port_num, uint8_t loopback_type, uint8_t enable); int oce_mbox_check_native_mode(POCE_SOFTC sc); int oce_mbox_post(POCE_SOFTC sc, struct oce_mbx *mbx, struct oce_mbx_ctx *mbxctx); int oce_mbox_write_flashrom(POCE_SOFTC sc, uint32_t optype,uint32_t opcode, POCE_DMA_MEM pdma_mem, uint32_t num_bytes); int oce_mbox_lancer_write_flashrom(POCE_SOFTC sc, uint32_t data_size, uint32_t data_offset,POCE_DMA_MEM pdma_mem, uint32_t *written_data, uint32_t *additional_status); int oce_mbox_get_flashrom_crc(POCE_SOFTC sc, uint8_t *flash_crc, uint32_t offset, uint32_t optype); int oce_mbox_get_phy_info(POCE_SOFTC sc, struct oce_phy_info *phy_info); int oce_mbox_create_rq(struct oce_rq *rq); int oce_mbox_create_wq(struct oce_wq *wq); int oce_mbox_create_eq(struct oce_eq *eq); int oce_mbox_cq_create(struct oce_cq *cq, uint32_t ncoalesce, uint32_t is_eventable); int oce_mbox_read_transrecv_data(POCE_SOFTC sc, uint32_t page_num); void oce_mbox_eqd_modify_periodic(POCE_SOFTC sc, struct oce_set_eqd *set_eqd, int num); int oce_get_profile_config(POCE_SOFTC sc, uint32_t max_rss); int oce_get_func_config(POCE_SOFTC sc); void mbx_common_req_hdr_init(struct mbx_hdr *hdr, uint8_t dom, uint8_t port, uint8_t subsys, uint8_t opcode, uint32_t timeout, uint32_t pyld_len, uint8_t version); uint16_t oce_mq_handler(void *arg); /************************************************************ * Transmit functions ************************************************************/ uint16_t oce_wq_handler(void *arg); void oce_start(struct ifnet *ifp); void oce_tx_task(void *arg, int npending); /************************************************************ * Receive functions ************************************************************/ int oce_alloc_rx_bufs(struct oce_rq *rq, int count); uint16_t oce_rq_handler(void *arg); /* Sysctl functions */ void oce_add_sysctls(POCE_SOFTC sc); void oce_refresh_queue_stats(POCE_SOFTC sc); int oce_refresh_nic_stats(POCE_SOFTC sc); int oce_stats_init(POCE_SOFTC sc); void oce_stats_free(POCE_SOFTC sc); +/* hw lro functions */ +int oce_mbox_nic_query_lro_capabilities(POCE_SOFTC sc, uint32_t *lro_rq_cnt, uint32_t *lro_flags); +int oce_mbox_nic_set_iface_lro_config(POCE_SOFTC sc, int enable); +int oce_mbox_create_rq_v2(struct oce_rq *rq); + /* Capabilities */ #define OCE_MODCAP_RSS 1 #define OCE_MAX_RSP_HANDLED 64 extern uint32_t oce_max_rsp_handled; /* max responses */ +extern uint32_t oce_rq_buf_size; #define OCE_MAC_LOOPBACK 0x0 #define OCE_PHY_LOOPBACK 0x1 #define OCE_ONE_PORT_EXT_LOOPBACK 0x2 #define OCE_NO_LOOPBACK 0xff #undef IFM_40G_SR4 #define IFM_40G_SR4 28 #define atomic_inc_32(x) atomic_add_32(x, 1) #define atomic_dec_32(x) atomic_subtract_32(x, 1) #define LE_64(x) htole64(x) #define LE_32(x) htole32(x) #define LE_16(x) htole16(x) #define HOST_64(x) le64toh(x) #define HOST_32(x) le32toh(x) #define HOST_16(x) le16toh(x) #define DW_SWAP(x, l) #define IS_ALIGNED(x,a) ((x % a) == 0) #define ADDR_HI(x) ((uint32_t)((uint64_t)(x) >> 32)) #define ADDR_LO(x) ((uint32_t)((uint64_t)(x) & 0xffffffff)); #define IF_LRO_ENABLED(sc) (((sc)->ifp->if_capenable & IFCAP_LRO) ? 1:0) #define IF_LSO_ENABLED(sc) (((sc)->ifp->if_capenable & IFCAP_TSO4) ? 1:0) #define IF_CSUM_ENABLED(sc) (((sc)->ifp->if_capenable & IFCAP_HWCSUM) ? 1:0) #define OCE_LOG2(x) (oce_highbit(x)) static inline uint32_t oce_highbit(uint32_t x) { int i; int c; int b; c = 0; b = 0; for (i = 0; i < 32; i++) { if ((1 << i) & x) { c++; b = i; } } if (c == 1) return b; return 0; } static inline int MPU_EP_SEMAPHORE(POCE_SOFTC sc) { if (IS_BE(sc)) return MPU_EP_SEMAPHORE_BE3; else if (IS_SH(sc)) return MPU_EP_SEMAPHORE_SH; else return MPU_EP_SEMAPHORE_XE201; } #define TRANSCEIVER_DATA_NUM_ELE 64 #define TRANSCEIVER_DATA_SIZE 256 #define TRANSCEIVER_A0_SIZE 128 #define TRANSCEIVER_A2_SIZE 128 #define PAGE_NUM_A0 0xa0 #define PAGE_NUM_A2 0xa2 #define IS_QNQ_OR_UMC(sc) ((sc->pvid && (sc->function_mode & FNM_UMC_MODE ))\ || (sc->qnqid && (sc->function_mode & FNM_FLEX10_MODE))) + +struct oce_rdma_info; +extern struct oce_rdma_if *oce_rdma_if; + + + +/* OS2BMC related */ + +#define DHCP_CLIENT_PORT 68 +#define DHCP_SERVER_PORT 67 +#define NET_BIOS_PORT1 137 +#define NET_BIOS_PORT2 138 +#define DHCPV6_RAS_PORT 547 + +#define BMC_FILT_BROADCAST_ARP ((uint32_t)(1)) +#define BMC_FILT_BROADCAST_DHCP_CLIENT ((uint32_t)(1 << 1)) +#define BMC_FILT_BROADCAST_DHCP_SERVER ((uint32_t)(1 << 2)) +#define BMC_FILT_BROADCAST_NET_BIOS ((uint32_t)(1 << 3)) +#define BMC_FILT_BROADCAST ((uint32_t)(1 << 4)) +#define BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER ((uint32_t)(1 << 5)) +#define BMC_FILT_MULTICAST_IPV6_RA ((uint32_t)(1 << 6)) +#define BMC_FILT_MULTICAST_IPV6_RAS ((uint32_t)(1 << 7)) +#define BMC_FILT_MULTICAST ((uint32_t)(1 << 8)) + +#define ND_ROUTER_ADVERT 134 +#define ND_NEIGHBOR_ADVERT 136 + +#define is_mc_allowed_on_bmc(sc, eh) \ + (!is_multicast_filt_enabled(sc) && \ + ETHER_IS_MULTICAST(eh->ether_dhost) && \ + !ETHER_IS_BROADCAST(eh->ether_dhost)) + +#define is_bc_allowed_on_bmc(sc, eh) \ + (!is_broadcast_filt_enabled(sc) && \ + ETHER_IS_BROADCAST(eh->ether_dhost)) + +#define is_arp_allowed_on_bmc(sc, et) \ + (is_arp(et) && is_arp_filt_enabled(sc)) + +#define is_arp(et) (et == ETHERTYPE_ARP) + +#define is_arp_filt_enabled(sc) \ + (sc->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP)) + +#define is_dhcp_client_filt_enabled(sc) \ + (sc->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT) + +#define is_dhcp_srvr_filt_enabled(sc) \ + (sc->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER) + +#define is_nbios_filt_enabled(sc) \ + (sc->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS) + +#define is_ipv6_na_filt_enabled(sc) \ + (sc->bmc_filt_mask & \ + BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER) + +#define is_ipv6_ra_filt_enabled(sc) \ + (sc->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA) + +#define is_ipv6_ras_filt_enabled(sc) \ + (sc->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS) + +#define is_broadcast_filt_enabled(sc) \ + (sc->bmc_filt_mask & BMC_FILT_BROADCAST) + +#define is_multicast_filt_enabled(sc) \ + (sc->bmc_filt_mask & BMC_FILT_MULTICAST) + +#define is_os2bmc_enabled(sc) (sc->flags & OCE_FLAGS_OS2BMC) + +#define LRO_FLAGS_HASH_MODE 0x00000001 +#define LRO_FLAGS_RSS_MODE 0x00000004 +#define LRO_FLAGS_CLSC_IPV4 0x00000010 +#define LRO_FLAGS_CLSC_IPV6 0x00000020 +#define NIC_RQ_FLAGS_RSS 0x0001 +#define NIC_RQ_FLAGS_LRO 0x0020 Index: stable/11/sys/dev/oce/oce_mbox.c =================================================================== --- stable/11/sys/dev/oce/oce_mbox.c (revision 338937) +++ stable/11/sys/dev/oce/oce_mbox.c (revision 338938) @@ -1,2222 +1,2332 @@ /*- * Copyright (C) 2013 Emulex * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Emulex Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * freebsd-drivers@emulex.com * * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 */ /* $FreeBSD$ */ #include "oce_if.h" extern uint32_t sfp_vpd_dump_buffer[TRANSCEIVER_DATA_NUM_ELE]; /** * @brief Reset (firmware) common function * @param sc software handle to the device * @returns 0 on success, ETIMEDOUT on failure */ int oce_reset_fun(POCE_SOFTC sc) { struct oce_mbx *mbx; struct oce_bmbx *mb; struct ioctl_common_function_reset *fwcmd; int rc = 0; if (sc->flags & OCE_FLAGS_FUNCRESET_RQD) { mb = OCE_DMAPTR(&sc->bsmbx, struct oce_bmbx); mbx = &mb->mbx; bzero(mbx, sizeof(struct oce_mbx)); fwcmd = (struct ioctl_common_function_reset *)&mbx->payload; mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_FUNCTION_RESET, 10, /* MBX_TIMEOUT_SEC */ sizeof(struct ioctl_common_function_reset), OCE_MBX_VER_V0); mbx->u0.s.embedded = 1; mbx->payload_length = sizeof(struct ioctl_common_function_reset); rc = oce_mbox_dispatch(sc, 2); } return rc; } /** * @brief This funtions tells firmware we are * done with commands. * @param sc software handle to the device * @returns 0 on success, ETIMEDOUT on failure */ int oce_fw_clean(POCE_SOFTC sc) { struct oce_bmbx *mbx; uint8_t *ptr; int ret = 0; mbx = OCE_DMAPTR(&sc->bsmbx, struct oce_bmbx); ptr = (uint8_t *) &mbx->mbx; /* Endian Signature */ *ptr++ = 0xff; *ptr++ = 0xaa; *ptr++ = 0xbb; *ptr++ = 0xff; *ptr++ = 0xff; *ptr++ = 0xcc; *ptr++ = 0xdd; *ptr = 0xff; ret = oce_mbox_dispatch(sc, 2); return ret; } /** * @brief Mailbox wait * @param sc software handle to the device * @param tmo_sec timeout in seconds */ static int oce_mbox_wait(POCE_SOFTC sc, uint32_t tmo_sec) { tmo_sec *= 10000; pd_mpu_mbox_db_t mbox_db; for (;;) { if (tmo_sec != 0) { if (--tmo_sec == 0) break; } mbox_db.dw0 = OCE_READ_REG32(sc, db, PD_MPU_MBOX_DB); if (mbox_db.bits.ready) return 0; DELAY(100); } device_printf(sc->dev, "Mailbox timed out\n"); return ETIMEDOUT; } /** * @brief Mailbox dispatch * @param sc software handle to the device * @param tmo_sec timeout in seconds */ int oce_mbox_dispatch(POCE_SOFTC sc, uint32_t tmo_sec) { pd_mpu_mbox_db_t mbox_db; uint32_t pa; int rc; oce_dma_sync(&sc->bsmbx, BUS_DMASYNC_PREWRITE); pa = (uint32_t) ((uint64_t) sc->bsmbx.paddr >> 34); bzero(&mbox_db, sizeof(pd_mpu_mbox_db_t)); mbox_db.bits.ready = 0; mbox_db.bits.hi = 1; mbox_db.bits.address = pa; rc = oce_mbox_wait(sc, tmo_sec); if (rc == 0) { OCE_WRITE_REG32(sc, db, PD_MPU_MBOX_DB, mbox_db.dw0); pa = (uint32_t) ((uint64_t) sc->bsmbx.paddr >> 4) & 0x3fffffff; mbox_db.bits.ready = 0; mbox_db.bits.hi = 0; mbox_db.bits.address = pa; rc = oce_mbox_wait(sc, tmo_sec); if (rc == 0) { OCE_WRITE_REG32(sc, db, PD_MPU_MBOX_DB, mbox_db.dw0); rc = oce_mbox_wait(sc, tmo_sec); oce_dma_sync(&sc->bsmbx, BUS_DMASYNC_POSTWRITE); } } return rc; } /** * @brief Mailbox common request header initialization * @param hdr mailbox header * @param dom domain * @param port port * @param subsys subsystem * @param opcode opcode * @param timeout timeout * @param pyld_len payload length */ void mbx_common_req_hdr_init(struct mbx_hdr *hdr, uint8_t dom, uint8_t port, uint8_t subsys, uint8_t opcode, uint32_t timeout, uint32_t pyld_len, uint8_t version) { hdr->u0.req.opcode = opcode; hdr->u0.req.subsystem = subsys; hdr->u0.req.port_number = port; hdr->u0.req.domain = dom; hdr->u0.req.timeout = timeout; hdr->u0.req.request_length = pyld_len - sizeof(struct mbx_hdr); hdr->u0.req.version = version; } /** * @brief Function to initialize the hw with host endian information * @param sc software handle to the device * @returns 0 on success, ETIMEDOUT on failure */ int oce_mbox_init(POCE_SOFTC sc) { struct oce_bmbx *mbx; uint8_t *ptr; int ret = 0; if (sc->flags & OCE_FLAGS_MBOX_ENDIAN_RQD) { mbx = OCE_DMAPTR(&sc->bsmbx, struct oce_bmbx); ptr = (uint8_t *) &mbx->mbx; /* Endian Signature */ *ptr++ = 0xff; *ptr++ = 0x12; *ptr++ = 0x34; *ptr++ = 0xff; *ptr++ = 0xff; *ptr++ = 0x56; *ptr++ = 0x78; *ptr = 0xff; ret = oce_mbox_dispatch(sc, 0); } return ret; } /** * @brief Function to get the firmware version * @param sc software handle to the device * @returns 0 on success, EIO on failure */ int oce_get_fw_version(POCE_SOFTC sc) { struct oce_mbx mbx; struct mbx_get_common_fw_version *fwcmd; int ret = 0; bzero(&mbx, sizeof(struct oce_mbx)); fwcmd = (struct mbx_get_common_fw_version *)&mbx.payload; mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_GET_FW_VERSION, MBX_TIMEOUT_SEC, sizeof(struct mbx_get_common_fw_version), OCE_MBX_VER_V0); mbx.u0.s.embedded = 1; mbx.payload_length = sizeof(struct mbx_get_common_fw_version); DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); ret = oce_mbox_post(sc, &mbx, NULL); if (!ret) ret = fwcmd->hdr.u0.rsp.status; if (ret) { device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, ret, fwcmd->hdr.u0.rsp.additional_status); goto error; } bcopy(fwcmd->params.rsp.fw_ver_str, sc->fw_version, 32); error: return ret; } /** * @brief Firmware will send gracious notifications during * attach only after sending first mcc commnad. We * use MCC queue only for getting async and mailbox * for sending cmds. So to get gracious notifications * atleast send one dummy command on mcc. */ int oce_first_mcc_cmd(POCE_SOFTC sc) { struct oce_mbx *mbx; struct oce_mq *mq = sc->mq; struct mbx_get_common_fw_version *fwcmd; uint32_t reg_value; mbx = RING_GET_PRODUCER_ITEM_VA(mq->ring, struct oce_mbx); bzero(mbx, sizeof(struct oce_mbx)); fwcmd = (struct mbx_get_common_fw_version *)&mbx->payload; mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_GET_FW_VERSION, MBX_TIMEOUT_SEC, sizeof(struct mbx_get_common_fw_version), OCE_MBX_VER_V0); mbx->u0.s.embedded = 1; mbx->payload_length = sizeof(struct mbx_get_common_fw_version); bus_dmamap_sync(mq->ring->dma.tag, mq->ring->dma.map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); RING_PUT(mq->ring, 1); reg_value = (1 << 16) | mq->mq_id; OCE_WRITE_REG32(sc, db, PD_MQ_DB, reg_value); return 0; } /** * @brief Function to post a MBX to the mbox * @param sc software handle to the device * @param mbx pointer to the MBX to send * @param mbxctx pointer to the mbx context structure * @returns 0 on success, error on failure */ int oce_mbox_post(POCE_SOFTC sc, struct oce_mbx *mbx, struct oce_mbx_ctx *mbxctx) { struct oce_mbx *mb_mbx = NULL; struct oce_mq_cqe *mb_cqe = NULL; struct oce_bmbx *mb = NULL; int rc = 0; uint32_t tmo = 0; uint32_t cstatus = 0; uint32_t xstatus = 0; LOCK(&sc->bmbx_lock); mb = OCE_DMAPTR(&sc->bsmbx, struct oce_bmbx); mb_mbx = &mb->mbx; /* get the tmo */ tmo = mbx->tag[0]; mbx->tag[0] = 0; /* copy mbx into mbox */ bcopy(mbx, mb_mbx, sizeof(struct oce_mbx)); /* now dispatch */ rc = oce_mbox_dispatch(sc, tmo); if (rc == 0) { /* * the command completed successfully. Now get the * completion queue entry */ mb_cqe = &mb->cqe; DW_SWAP(u32ptr(&mb_cqe->u0.dw[0]), sizeof(struct oce_mq_cqe)); /* copy mbox mbx back */ bcopy(mb_mbx, mbx, sizeof(struct oce_mbx)); /* pick up the mailbox status */ cstatus = mb_cqe->u0.s.completion_status; xstatus = mb_cqe->u0.s.extended_status; /* * store the mbx context in the cqe tag section so that * the upper layer handling the cqe can associate the mbx * with the response */ if (cstatus == 0 && mbxctx) { /* save context */ mbxctx->mbx = mb_mbx; bcopy(&mbxctx, mb_cqe->u0.s.mq_tag, sizeof(struct oce_mbx_ctx *)); } } UNLOCK(&sc->bmbx_lock); return rc; } /** * @brief Function to read the mac address associated with an interface * @param sc software handle to the device * @param if_id interface id to read the address from * @param perm set to 1 if reading the factory mac address. * In this case if_id is ignored * @param type type of the mac address, whether network or storage * @param[out] mac [OUTPUT] pointer to a buffer containing the * mac address when the command succeeds. * @returns 0 on success, EIO on failure */ int oce_read_mac_addr(POCE_SOFTC sc, uint32_t if_id, uint8_t perm, uint8_t type, struct mac_address_format *mac) { struct oce_mbx mbx; struct mbx_query_common_iface_mac *fwcmd; int ret = 0; bzero(&mbx, sizeof(struct oce_mbx)); fwcmd = (struct mbx_query_common_iface_mac *)&mbx.payload; mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_QUERY_IFACE_MAC, MBX_TIMEOUT_SEC, sizeof(struct mbx_query_common_iface_mac), OCE_MBX_VER_V0); fwcmd->params.req.permanent = perm; if (!perm) fwcmd->params.req.if_id = (uint16_t) if_id; else fwcmd->params.req.if_id = 0; fwcmd->params.req.type = type; mbx.u0.s.embedded = 1; mbx.payload_length = sizeof(struct mbx_query_common_iface_mac); DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); ret = oce_mbox_post(sc, &mbx, NULL); if (!ret) ret = fwcmd->hdr.u0.rsp.status; if (ret) { device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, ret, fwcmd->hdr.u0.rsp.additional_status); goto error; } /* copy the mac addres in the output parameter */ mac->size_of_struct = fwcmd->params.rsp.mac.size_of_struct; bcopy(&fwcmd->params.rsp.mac.mac_addr[0], &mac->mac_addr[0], mac->size_of_struct); error: return ret; } /** * @brief Function to query the fw attributes from the hw * @param sc software handle to the device * @returns 0 on success, EIO on failure */ int oce_get_fw_config(POCE_SOFTC sc) { struct oce_mbx mbx; struct mbx_common_query_fw_config *fwcmd; int ret = 0; bzero(&mbx, sizeof(struct oce_mbx)); fwcmd = (struct mbx_common_query_fw_config *)&mbx.payload; mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, MBX_TIMEOUT_SEC, sizeof(struct mbx_common_query_fw_config), OCE_MBX_VER_V0); mbx.u0.s.embedded = 1; mbx.payload_length = sizeof(struct mbx_common_query_fw_config); DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); ret = oce_mbox_post(sc, &mbx, NULL); if (!ret) ret = fwcmd->hdr.u0.rsp.status; if (ret) { device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, ret, fwcmd->hdr.u0.rsp.additional_status); goto error; } DW_SWAP(u32ptr(fwcmd), sizeof(struct mbx_common_query_fw_config)); sc->config_number = HOST_32(fwcmd->params.rsp.config_number); sc->asic_revision = HOST_32(fwcmd->params.rsp.asic_revision); sc->port_id = HOST_32(fwcmd->params.rsp.port_id); sc->function_mode = HOST_32(fwcmd->params.rsp.function_mode); + if ((sc->function_mode & (ULP_NIC_MODE | ULP_RDMA_MODE)) == + (ULP_NIC_MODE | ULP_RDMA_MODE)) { + sc->rdma_flags = OCE_RDMA_FLAG_SUPPORTED; + } sc->function_caps = HOST_32(fwcmd->params.rsp.function_caps); if (fwcmd->params.rsp.ulp[0].ulp_mode & ULP_NIC_MODE) { sc->max_tx_rings = HOST_32(fwcmd->params.rsp.ulp[0].nic_wq_tot); sc->max_rx_rings = HOST_32(fwcmd->params.rsp.ulp[0].lro_rqid_tot); } else { sc->max_tx_rings = HOST_32(fwcmd->params.rsp.ulp[1].nic_wq_tot); sc->max_rx_rings = HOST_32(fwcmd->params.rsp.ulp[1].lro_rqid_tot); } error: return ret; } /** * * @brief function to create a device interface * @param sc software handle to the device * @param cap_flags capability flags * @param en_flags enable capability flags * @param vlan_tag optional vlan tag to associate with the if * @param mac_addr pointer to a buffer containing the mac address * @param[out] if_id [OUTPUT] pointer to an integer to hold the ID of the interface created * @returns 0 on success, EIO on failure */ int oce_if_create(POCE_SOFTC sc, uint32_t cap_flags, uint32_t en_flags, uint16_t vlan_tag, uint8_t *mac_addr, uint32_t *if_id) { struct oce_mbx mbx; struct mbx_create_common_iface *fwcmd; int rc = 0; bzero(&mbx, sizeof(struct oce_mbx)); fwcmd = (struct mbx_create_common_iface *)&mbx.payload; mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_CREATE_IFACE, MBX_TIMEOUT_SEC, sizeof(struct mbx_create_common_iface), OCE_MBX_VER_V0); DW_SWAP(u32ptr(&fwcmd->hdr), sizeof(struct mbx_hdr)); fwcmd->params.req.version = 0; fwcmd->params.req.cap_flags = LE_32(cap_flags); fwcmd->params.req.enable_flags = LE_32(en_flags); if (mac_addr != NULL) { bcopy(mac_addr, &fwcmd->params.req.mac_addr[0], 6); fwcmd->params.req.vlan_tag.u0.normal.vtag = LE_16(vlan_tag); fwcmd->params.req.mac_invalid = 0; } else { fwcmd->params.req.mac_invalid = 1; } mbx.u0.s.embedded = 1; mbx.payload_length = sizeof(struct mbx_create_common_iface); DW_SWAP(u32ptr(&mbx), OCE_BMBX_RHDR_SZ); rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) { device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); goto error; } *if_id = HOST_32(fwcmd->params.rsp.if_id); if (mac_addr != NULL) sc->pmac_id = HOST_32(fwcmd->params.rsp.pmac_id); error: return rc; } /** * @brief Function to delete an interface * @param sc software handle to the device * @param if_id ID of the interface to delete * @returns 0 on success, EIO on failure */ int oce_if_del(POCE_SOFTC sc, uint32_t if_id) { struct oce_mbx mbx; struct mbx_destroy_common_iface *fwcmd; int rc = 0; bzero(&mbx, sizeof(struct oce_mbx)); fwcmd = (struct mbx_destroy_common_iface *)&mbx.payload; mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_DESTROY_IFACE, MBX_TIMEOUT_SEC, sizeof(struct mbx_destroy_common_iface), OCE_MBX_VER_V0); fwcmd->params.req.if_id = if_id; mbx.u0.s.embedded = 1; mbx.payload_length = sizeof(struct mbx_destroy_common_iface); DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); return rc; } /** * @brief Function to send the mbx command to configure vlan * @param sc software handle to the device * @param if_id interface identifier index * @param vtag_arr array of vlan tags * @param vtag_cnt number of elements in array * @param untagged boolean TRUE/FLASE * @param enable_promisc flag to enable/disable VLAN promiscuous mode * @returns 0 on success, EIO on failure */ int oce_config_vlan(POCE_SOFTC sc, uint32_t if_id, struct normal_vlan *vtag_arr, uint8_t vtag_cnt, uint32_t untagged, uint32_t enable_promisc) { struct oce_mbx mbx; struct mbx_common_config_vlan *fwcmd; int rc = 0; if (sc->vlans_added > sc->max_vlans) goto vlan_promisc; bzero(&mbx, sizeof(struct oce_mbx)); fwcmd = (struct mbx_common_config_vlan *)&mbx.payload; mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_CONFIG_IFACE_VLAN, MBX_TIMEOUT_SEC, sizeof(struct mbx_common_config_vlan), OCE_MBX_VER_V0); fwcmd->params.req.if_id = (uint8_t) if_id; fwcmd->params.req.promisc = (uint8_t) enable_promisc; fwcmd->params.req.untagged = (uint8_t) untagged; fwcmd->params.req.num_vlans = vtag_cnt; if (!enable_promisc) { bcopy(vtag_arr, fwcmd->params.req.tags.normal_vlans, vtag_cnt * sizeof(struct normal_vlan)); } mbx.u0.s.embedded = 1; mbx.payload_length = sizeof(struct mbx_common_config_vlan); DW_SWAP(u32ptr(&mbx), (OCE_BMBX_RHDR_SZ + mbx.payload_length)); rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); goto done; vlan_promisc: /* Enable Vlan Promis */ oce_rxf_set_promiscuous(sc, (1 << 1)); device_printf(sc->dev,"Enabling Vlan Promisc Mode\n"); done: return rc; } /** * @brief Function to set flow control capability in the hardware * @param sc software handle to the device * @param flow_control flow control flags to set * @returns 0 on success, EIO on failure */ int oce_set_flow_control(POCE_SOFTC sc, uint32_t flow_control) { struct oce_mbx mbx; struct mbx_common_get_set_flow_control *fwcmd = (struct mbx_common_get_set_flow_control *)&mbx.payload; int rc; bzero(&mbx, sizeof(struct oce_mbx)); mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_SET_FLOW_CONTROL, MBX_TIMEOUT_SEC, sizeof(struct mbx_common_get_set_flow_control), OCE_MBX_VER_V0); if (flow_control & OCE_FC_TX) fwcmd->tx_flow_control = 1; if (flow_control & OCE_FC_RX) fwcmd->rx_flow_control = 1; mbx.u0.s.embedded = 1; mbx.payload_length = sizeof(struct mbx_common_get_set_flow_control); DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); return rc; } /** * @brief Initialize the RSS CPU indirection table * * The table is used to choose the queue to place the incomming packets. * Incomming packets are hashed. The lowest bits in the hash result * are used as the index into the CPU indirection table. * Each entry in the table contains the RSS CPU-ID returned by the NIC * create. Based on the CPU ID, the receive completion is routed to * the corresponding RSS CQs. (Non-RSS packets are always completed * on the default (0) CQ). * * @param sc software handle to the device * @param *fwcmd pointer to the rss mbox command * @returns none */ static int oce_rss_itbl_init(POCE_SOFTC sc, struct mbx_config_nic_rss *fwcmd) { int i = 0, j = 0, rc = 0; uint8_t *tbl = fwcmd->params.req.cputable; struct oce_rq *rq = NULL; for (j = 0; j < INDIRECTION_TABLE_ENTRIES ; j += (sc->nrqs - 1)) { for_all_rss_queues(sc, rq, i) { if ((j + i) >= INDIRECTION_TABLE_ENTRIES) break; tbl[j + i] = rq->rss_cpuid; } } if (i == 0) { device_printf(sc->dev, "error: Invalid number of RSS RQ's\n"); rc = ENXIO; } /* fill log2 value indicating the size of the CPU table */ if (rc == 0) - fwcmd->params.req.cpu_tbl_sz_log2 = LE_16(OCE_LOG2(i)); + fwcmd->params.req.cpu_tbl_sz_log2 = LE_16(OCE_LOG2(INDIRECTION_TABLE_ENTRIES)); return rc; } /** * @brief Function to set flow control capability in the hardware * @param sc software handle to the device * @param if_id interface id to read the address from * @param enable_rss 0=disable, RSS_ENABLE_xxx flags otherwise * @returns 0 on success, EIO on failure */ int oce_config_nic_rss(POCE_SOFTC sc, uint32_t if_id, uint16_t enable_rss) { int rc; struct oce_mbx mbx; struct mbx_config_nic_rss *fwcmd = (struct mbx_config_nic_rss *)&mbx.payload; int version; bzero(&mbx, sizeof(struct oce_mbx)); if (IS_XE201(sc) || IS_SH(sc)) { version = OCE_MBX_VER_V1; fwcmd->params.req.enable_rss = RSS_ENABLE_UDP_IPV4 | RSS_ENABLE_UDP_IPV6; } else version = OCE_MBX_VER_V0; mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_NIC, NIC_CONFIG_RSS, MBX_TIMEOUT_SEC, sizeof(struct mbx_config_nic_rss), version); if (enable_rss) fwcmd->params.req.enable_rss |= (RSS_ENABLE_IPV4 | RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV6 | RSS_ENABLE_TCP_IPV6); - fwcmd->params.req.flush = OCE_FLUSH; + + if(!sc->enable_hwlro) + fwcmd->params.req.flush = OCE_FLUSH; + else + fwcmd->params.req.flush = 0; + fwcmd->params.req.if_id = LE_32(if_id); + srandom(arc4random()); /* random entropy seed */ read_random(fwcmd->params.req.hash, sizeof(fwcmd->params.req.hash)); rc = oce_rss_itbl_init(sc, fwcmd); if (rc == 0) { mbx.u0.s.embedded = 1; mbx.payload_length = sizeof(struct mbx_config_nic_rss); DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); } return rc; } /** * @brief RXF function to enable/disable device promiscuous mode * @param sc software handle to the device * @param enable enable/disable flag * @returns 0 on success, EIO on failure * @note * The NIC_CONFIG_PROMISCUOUS command deprecated for Lancer. * This function uses the COMMON_SET_IFACE_RX_FILTER command instead. */ int oce_rxf_set_promiscuous(POCE_SOFTC sc, uint8_t enable) { struct mbx_set_common_iface_rx_filter *fwcmd; int sz = sizeof(struct mbx_set_common_iface_rx_filter); iface_rx_filter_ctx_t *req; OCE_DMA_MEM sgl; int rc; /* allocate mbx payload's dma scatter/gather memory */ rc = oce_dma_alloc(sc, sz, &sgl, 0); if (rc) return rc; fwcmd = OCE_DMAPTR(&sgl, struct mbx_set_common_iface_rx_filter); req = &fwcmd->params.req; req->iface_flags_mask = MBX_RX_IFACE_FLAGS_PROMISCUOUS | MBX_RX_IFACE_FLAGS_VLAN_PROMISCUOUS; /* Bit 0 Mac promisc, Bit 1 Vlan promisc */ if (enable & 0x01) req->iface_flags = MBX_RX_IFACE_FLAGS_PROMISCUOUS; if (enable & 0x02) - req->iface_flags = MBX_RX_IFACE_FLAGS_VLAN_PROMISCUOUS; + req->iface_flags |= MBX_RX_IFACE_FLAGS_VLAN_PROMISCUOUS; req->if_id = sc->if_id; rc = oce_set_common_iface_rx_filter(sc, &sgl); oce_dma_free(sc, &sgl); return rc; } /** * @brief Function modify and select rx filter options * @param sc software handle to the device * @param sgl scatter/gather request/response * @returns 0 on success, error code on failure */ int oce_set_common_iface_rx_filter(POCE_SOFTC sc, POCE_DMA_MEM sgl) { struct oce_mbx mbx; int mbx_sz = sizeof(struct mbx_set_common_iface_rx_filter); struct mbx_set_common_iface_rx_filter *fwcmd; int rc; bzero(&mbx, sizeof(struct oce_mbx)); fwcmd = OCE_DMAPTR(sgl, struct mbx_set_common_iface_rx_filter); mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_SET_IFACE_RX_FILTER, MBX_TIMEOUT_SEC, mbx_sz, OCE_MBX_VER_V0); oce_dma_sync(sgl, BUS_DMASYNC_PREWRITE); mbx.u0.s.embedded = 0; mbx.u0.s.sge_count = 1; mbx.payload.u0.u1.sgl[0].pa_lo = ADDR_LO(sgl->paddr); mbx.payload.u0.u1.sgl[0].pa_hi = ADDR_HI(sgl->paddr); mbx.payload.u0.u1.sgl[0].length = mbx_sz; mbx.payload_length = mbx_sz; DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); return rc; } /** * @brief Function to query the link status from the hardware * @param sc software handle to the device * @param[out] link pointer to the structure returning link attributes * @returns 0 on success, EIO on failure */ int oce_get_link_status(POCE_SOFTC sc, struct link_status *link) { struct oce_mbx mbx; struct mbx_query_common_link_config *fwcmd; int rc = 0, version; bzero(&mbx, sizeof(struct oce_mbx)); IS_BE2(sc) ? (version = OCE_MBX_VER_V0) : (version = OCE_MBX_VER_V1); fwcmd = (struct mbx_query_common_link_config *)&mbx.payload; mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_QUERY_LINK_CONFIG, MBX_TIMEOUT_SEC, sizeof(struct mbx_query_common_link_config), version); mbx.u0.s.embedded = 1; mbx.payload_length = sizeof(struct mbx_query_common_link_config); DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) { device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); goto error; } /* interpret response */ link->qos_link_speed = HOST_16(fwcmd->params.rsp.qos_link_speed); link->phys_port_speed = fwcmd->params.rsp.physical_port_speed; link->logical_link_status = fwcmd->params.rsp.logical_link_status; error: return rc; } - -int -oce_mbox_get_nic_stats_v0(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem) -{ - struct oce_mbx mbx; - struct mbx_get_nic_stats_v0 *fwcmd; - int rc = 0; - - bzero(&mbx, sizeof(struct oce_mbx)); - - fwcmd = OCE_DMAPTR(pstats_dma_mem, struct mbx_get_nic_stats_v0); - bzero(fwcmd, sizeof(struct mbx_get_nic_stats_v0)); - - mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, - MBX_SUBSYSTEM_NIC, - NIC_GET_STATS, - MBX_TIMEOUT_SEC, - sizeof(struct mbx_get_nic_stats_v0), - OCE_MBX_VER_V0); - - mbx.u0.s.embedded = 0; - mbx.u0.s.sge_count = 1; - - oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_PREWRITE); - - mbx.payload.u0.u1.sgl[0].pa_lo = ADDR_LO(pstats_dma_mem->paddr); - mbx.payload.u0.u1.sgl[0].pa_hi = ADDR_HI(pstats_dma_mem->paddr); - mbx.payload.u0.u1.sgl[0].length = sizeof(struct mbx_get_nic_stats_v0); - - mbx.payload_length = sizeof(struct mbx_get_nic_stats_v0); - - DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); - - rc = oce_mbox_post(sc, &mbx, NULL); - - oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_POSTWRITE); - - if (!rc) - rc = fwcmd->hdr.u0.rsp.status; - if (rc) - device_printf(sc->dev, - "%s failed - cmd status: %d addi status: %d\n", - __FUNCTION__, rc, - fwcmd->hdr.u0.rsp.additional_status); - return rc; -} - - - /** * @brief Function to get NIC statistics - * @param sc software handle to the device - * @param *stats pointer to where to store statistics - * @param reset_stats resets statistics of set - * @returns 0 on success, EIO on failure - * @note command depricated in Lancer + * @param sc software handle to the device + * @param *stats pointer to where to store statistics + * @param reset_stats resets statistics of set + * @returns 0 on success, EIO on failure + * @note command depricated in Lancer */ -int -oce_mbox_get_nic_stats(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem) -{ - struct oce_mbx mbx; - struct mbx_get_nic_stats *fwcmd; - int rc = 0; - - bzero(&mbx, sizeof(struct oce_mbx)); - fwcmd = OCE_DMAPTR(pstats_dma_mem, struct mbx_get_nic_stats); - bzero(fwcmd, sizeof(struct mbx_get_nic_stats)); - - mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, - MBX_SUBSYSTEM_NIC, - NIC_GET_STATS, - MBX_TIMEOUT_SEC, - sizeof(struct mbx_get_nic_stats), - OCE_MBX_VER_V1); - - - mbx.u0.s.embedded = 0; /* stats too large for embedded mbx rsp */ - mbx.u0.s.sge_count = 1; /* using scatter gather instead */ - - oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_PREWRITE); - mbx.payload.u0.u1.sgl[0].pa_lo = ADDR_LO(pstats_dma_mem->paddr); - mbx.payload.u0.u1.sgl[0].pa_hi = ADDR_HI(pstats_dma_mem->paddr); - mbx.payload.u0.u1.sgl[0].length = sizeof(struct mbx_get_nic_stats); - - mbx.payload_length = sizeof(struct mbx_get_nic_stats); - DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); - - rc = oce_mbox_post(sc, &mbx, NULL); - oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_POSTWRITE); - if (!rc) - rc = fwcmd->hdr.u0.rsp.status; - if (rc) - device_printf(sc->dev, - "%s failed - cmd status: %d addi status: %d\n", - __FUNCTION__, rc, - fwcmd->hdr.u0.rsp.additional_status); - return rc; +#define OCE_MBOX_GET_NIC_STATS(sc, pstats_dma_mem, version) \ +int \ +oce_mbox_get_nic_stats_v##version(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem) \ +{ \ + struct oce_mbx mbx; \ + struct mbx_get_nic_stats_v##version *fwcmd; \ + int rc = 0; \ + \ + bzero(&mbx, sizeof(struct oce_mbx)); \ + fwcmd = OCE_DMAPTR(pstats_dma_mem, struct mbx_get_nic_stats_v##version); \ + bzero(fwcmd, sizeof(*fwcmd)); \ + \ + mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, \ + MBX_SUBSYSTEM_NIC, \ + NIC_GET_STATS, \ + MBX_TIMEOUT_SEC, \ + sizeof(*fwcmd), \ + OCE_MBX_VER_V##version); \ + \ + mbx.u0.s.embedded = 0; /* stats too large for embedded mbx rsp */ \ + mbx.u0.s.sge_count = 1; /* using scatter gather instead */ \ + \ + oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_PREWRITE); \ + mbx.payload.u0.u1.sgl[0].pa_lo = ADDR_LO(pstats_dma_mem->paddr); \ + mbx.payload.u0.u1.sgl[0].pa_hi = ADDR_HI(pstats_dma_mem->paddr); \ + mbx.payload.u0.u1.sgl[0].length = sizeof(*fwcmd); \ + mbx.payload_length = sizeof(*fwcmd); \ + DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); \ + \ + rc = oce_mbox_post(sc, &mbx, NULL); \ + oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_POSTWRITE); \ + if (!rc) \ + rc = fwcmd->hdr.u0.rsp.status; \ + if (rc) \ + device_printf(sc->dev, \ + "%s failed - cmd status: %d addi status: %d\n", \ + __FUNCTION__, rc, \ + fwcmd->hdr.u0.rsp.additional_status); \ + return rc; \ } +OCE_MBOX_GET_NIC_STATS(sc, pstats_dma_mem, 0); +OCE_MBOX_GET_NIC_STATS(sc, pstats_dma_mem, 1); +OCE_MBOX_GET_NIC_STATS(sc, pstats_dma_mem, 2); + /** * @brief Function to get pport (physical port) statistics * @param sc software handle to the device * @param *stats pointer to where to store statistics * @param reset_stats resets statistics of set * @returns 0 on success, EIO on failure */ int oce_mbox_get_pport_stats(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem, uint32_t reset_stats) { struct oce_mbx mbx; struct mbx_get_pport_stats *fwcmd; int rc = 0; bzero(&mbx, sizeof(struct oce_mbx)); fwcmd = OCE_DMAPTR(pstats_dma_mem, struct mbx_get_pport_stats); bzero(fwcmd, sizeof(struct mbx_get_pport_stats)); mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_NIC, NIC_GET_PPORT_STATS, MBX_TIMEOUT_SEC, sizeof(struct mbx_get_pport_stats), OCE_MBX_VER_V0); fwcmd->params.req.reset_stats = reset_stats; fwcmd->params.req.port_number = sc->port_id; mbx.u0.s.embedded = 0; /* stats too large for embedded mbx rsp */ mbx.u0.s.sge_count = 1; /* using scatter gather instead */ oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_PREWRITE); mbx.payload.u0.u1.sgl[0].pa_lo = ADDR_LO(pstats_dma_mem->paddr); mbx.payload.u0.u1.sgl[0].pa_hi = ADDR_HI(pstats_dma_mem->paddr); mbx.payload.u0.u1.sgl[0].length = sizeof(struct mbx_get_pport_stats); mbx.payload_length = sizeof(struct mbx_get_pport_stats); DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); rc = oce_mbox_post(sc, &mbx, NULL); oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_POSTWRITE); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); return rc; } /** * @brief Function to get vport (virtual port) statistics * @param sc software handle to the device * @param *stats pointer to where to store statistics * @param reset_stats resets statistics of set * @returns 0 on success, EIO on failure */ int oce_mbox_get_vport_stats(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem, uint32_t req_size, uint32_t reset_stats) { struct oce_mbx mbx; struct mbx_get_vport_stats *fwcmd; int rc = 0; bzero(&mbx, sizeof(struct oce_mbx)); fwcmd = OCE_DMAPTR(pstats_dma_mem, struct mbx_get_vport_stats); bzero(fwcmd, sizeof(struct mbx_get_vport_stats)); mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_NIC, NIC_GET_VPORT_STATS, MBX_TIMEOUT_SEC, sizeof(struct mbx_get_vport_stats), OCE_MBX_VER_V0); fwcmd->params.req.reset_stats = reset_stats; fwcmd->params.req.vport_number = sc->if_id; mbx.u0.s.embedded = 0; /* stats too large for embedded mbx rsp */ mbx.u0.s.sge_count = 1; /* using scatter gather instead */ oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_PREWRITE); mbx.payload.u0.u1.sgl[0].pa_lo = ADDR_LO(pstats_dma_mem->paddr); mbx.payload.u0.u1.sgl[0].pa_hi = ADDR_HI(pstats_dma_mem->paddr); mbx.payload.u0.u1.sgl[0].length = sizeof(struct mbx_get_vport_stats); mbx.payload_length = sizeof(struct mbx_get_vport_stats); DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); rc = oce_mbox_post(sc, &mbx, NULL); oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_POSTWRITE); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); return rc; } /** * @brief Function to update the muticast filter with * values in dma_mem * @param sc software handle to the device * @param dma_mem pointer to dma memory region * @returns 0 on success, EIO on failure */ int oce_update_multicast(POCE_SOFTC sc, POCE_DMA_MEM pdma_mem) { struct oce_mbx mbx; struct oce_mq_sge *sgl; struct mbx_set_common_iface_multicast *req = NULL; int rc = 0; req = OCE_DMAPTR(pdma_mem, struct mbx_set_common_iface_multicast); mbx_common_req_hdr_init(&req->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_SET_IFACE_MULTICAST, MBX_TIMEOUT_SEC, sizeof(struct mbx_set_common_iface_multicast), OCE_MBX_VER_V0); bzero(&mbx, sizeof(struct oce_mbx)); mbx.u0.s.embedded = 0; /*Non embeded*/ mbx.payload_length = sizeof(struct mbx_set_common_iface_multicast); mbx.u0.s.sge_count = 1; sgl = &mbx.payload.u0.u1.sgl[0]; sgl->pa_hi = htole32(upper_32_bits(pdma_mem->paddr)); sgl->pa_lo = htole32((pdma_mem->paddr) & 0xFFFFFFFF); sgl->length = htole32(mbx.payload_length); DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = req->hdr.u0.rsp.status; if (rc) device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, req->hdr.u0.rsp.additional_status); return rc; } /** * @brief Function to send passthrough Ioctls * @param sc software handle to the device * @param dma_mem pointer to dma memory region * @param req_size size of dma_mem * @returns 0 on success, EIO on failure */ int oce_pass_through_mbox(POCE_SOFTC sc, POCE_DMA_MEM dma_mem, uint32_t req_size) { struct oce_mbx mbx; struct oce_mq_sge *sgl; int rc = 0; bzero(&mbx, sizeof(struct oce_mbx)); mbx.u0.s.embedded = 0; /*Non embeded*/ mbx.payload_length = req_size; mbx.u0.s.sge_count = 1; sgl = &mbx.payload.u0.u1.sgl[0]; sgl->pa_hi = htole32(upper_32_bits(dma_mem->paddr)); sgl->pa_lo = htole32((dma_mem->paddr) & 0xFFFFFFFF); sgl->length = htole32(req_size); DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); rc = oce_mbox_post(sc, &mbx, NULL); return rc; } int oce_mbox_macaddr_add(POCE_SOFTC sc, uint8_t *mac_addr, uint32_t if_id, uint32_t *pmac_id) { struct oce_mbx mbx; struct mbx_add_common_iface_mac *fwcmd; int rc = 0; bzero(&mbx, sizeof(struct oce_mbx)); fwcmd = (struct mbx_add_common_iface_mac *)&mbx.payload; mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_ADD_IFACE_MAC, MBX_TIMEOUT_SEC, sizeof(struct mbx_add_common_iface_mac), OCE_MBX_VER_V0); fwcmd->params.req.if_id = (uint16_t) if_id; bcopy(mac_addr, fwcmd->params.req.mac_address, 6); mbx.u0.s.embedded = 1; mbx.payload_length = sizeof(struct mbx_add_common_iface_mac); DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) { device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); goto error; } *pmac_id = fwcmd->params.rsp.pmac_id; error: return rc; } int oce_mbox_macaddr_del(POCE_SOFTC sc, uint32_t if_id, uint32_t pmac_id) { struct oce_mbx mbx; struct mbx_del_common_iface_mac *fwcmd; int rc = 0; bzero(&mbx, sizeof(struct oce_mbx)); fwcmd = (struct mbx_del_common_iface_mac *)&mbx.payload; mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_DEL_IFACE_MAC, MBX_TIMEOUT_SEC, sizeof(struct mbx_del_common_iface_mac), OCE_MBX_VER_V0); fwcmd->params.req.if_id = (uint16_t)if_id; fwcmd->params.req.pmac_id = pmac_id; mbx.u0.s.embedded = 1; mbx.payload_length = sizeof(struct mbx_del_common_iface_mac); DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); return rc; } int oce_mbox_check_native_mode(POCE_SOFTC sc) { struct oce_mbx mbx; struct mbx_common_set_function_cap *fwcmd; int rc = 0; bzero(&mbx, sizeof(struct oce_mbx)); fwcmd = (struct mbx_common_set_function_cap *)&mbx.payload; mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_SET_FUNCTIONAL_CAPS, MBX_TIMEOUT_SEC, sizeof(struct mbx_common_set_function_cap), OCE_MBX_VER_V0); fwcmd->params.req.valid_capability_flags = CAP_SW_TIMESTAMPS | CAP_BE3_NATIVE_ERX_API; fwcmd->params.req.capability_flags = CAP_BE3_NATIVE_ERX_API; mbx.u0.s.embedded = 1; mbx.payload_length = sizeof(struct mbx_common_set_function_cap); DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) { device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); goto error; } sc->be3_native = HOST_32(fwcmd->params.rsp.capability_flags) & CAP_BE3_NATIVE_ERX_API; error: return 0; } int oce_mbox_cmd_set_loopback(POCE_SOFTC sc, uint8_t port_num, uint8_t loopback_type, uint8_t enable) { struct oce_mbx mbx; struct mbx_lowlevel_set_loopback_mode *fwcmd; int rc = 0; bzero(&mbx, sizeof(struct oce_mbx)); fwcmd = (struct mbx_lowlevel_set_loopback_mode *)&mbx.payload; mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_LOWLEVEL, OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, MBX_TIMEOUT_SEC, sizeof(struct mbx_lowlevel_set_loopback_mode), OCE_MBX_VER_V0); fwcmd->params.req.src_port = port_num; fwcmd->params.req.dest_port = port_num; fwcmd->params.req.loopback_type = loopback_type; fwcmd->params.req.loopback_state = enable; mbx.u0.s.embedded = 1; mbx.payload_length = sizeof(struct mbx_lowlevel_set_loopback_mode); DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); return rc; } int oce_mbox_cmd_test_loopback(POCE_SOFTC sc, uint32_t port_num, uint32_t loopback_type, uint32_t pkt_size, uint32_t num_pkts, uint64_t pattern) { struct oce_mbx mbx; struct mbx_lowlevel_test_loopback_mode *fwcmd; int rc = 0; bzero(&mbx, sizeof(struct oce_mbx)); fwcmd = (struct mbx_lowlevel_test_loopback_mode *)&mbx.payload; mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_LOWLEVEL, OPCODE_LOWLEVEL_TEST_LOOPBACK, MBX_TIMEOUT_SEC, sizeof(struct mbx_lowlevel_test_loopback_mode), OCE_MBX_VER_V0); fwcmd->params.req.pattern = pattern; fwcmd->params.req.src_port = port_num; fwcmd->params.req.dest_port = port_num; fwcmd->params.req.pkt_size = pkt_size; fwcmd->params.req.num_pkts = num_pkts; fwcmd->params.req.loopback_type = loopback_type; mbx.u0.s.embedded = 1; mbx.payload_length = sizeof(struct mbx_lowlevel_test_loopback_mode); DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); return rc; } int oce_mbox_write_flashrom(POCE_SOFTC sc, uint32_t optype,uint32_t opcode, POCE_DMA_MEM pdma_mem, uint32_t num_bytes) { struct oce_mbx mbx; struct oce_mq_sge *sgl = NULL; struct mbx_common_read_write_flashrom *fwcmd = NULL; int rc = 0, payload_len = 0; bzero(&mbx, sizeof(struct oce_mbx)); fwcmd = OCE_DMAPTR(pdma_mem, struct mbx_common_read_write_flashrom); payload_len = sizeof(struct mbx_common_read_write_flashrom) + 32*1024; mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_WRITE_FLASHROM, LONG_TIMEOUT, payload_len, OCE_MBX_VER_V0); fwcmd->flash_op_type = LE_32(optype); fwcmd->flash_op_code = LE_32(opcode); fwcmd->data_buffer_size = LE_32(num_bytes); mbx.u0.s.embedded = 0; /*Non embeded*/ mbx.payload_length = payload_len; mbx.u0.s.sge_count = 1; sgl = &mbx.payload.u0.u1.sgl[0]; sgl->pa_hi = upper_32_bits(pdma_mem->paddr); sgl->pa_lo = pdma_mem->paddr & 0xFFFFFFFF; sgl->length = payload_len; /* post the command */ rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); return rc; } int oce_mbox_get_flashrom_crc(POCE_SOFTC sc, uint8_t *flash_crc, uint32_t offset, uint32_t optype) { int rc = 0, payload_len = 0; struct oce_mbx mbx; struct mbx_common_read_write_flashrom *fwcmd; bzero(&mbx, sizeof(struct oce_mbx)); fwcmd = (struct mbx_common_read_write_flashrom *)&mbx.payload; /* Firmware requires extra 4 bytes with this ioctl. Since there is enough room in the mbx payload it should be good enough Reference: Bug 14853 */ payload_len = sizeof(struct mbx_common_read_write_flashrom) + 4; mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_READ_FLASHROM, MBX_TIMEOUT_SEC, payload_len, OCE_MBX_VER_V0); fwcmd->flash_op_type = optype; fwcmd->flash_op_code = FLASHROM_OPER_REPORT; fwcmd->data_offset = offset; fwcmd->data_buffer_size = 0x4; mbx.u0.s.embedded = 1; mbx.payload_length = payload_len; /* post the command */ rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) { device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); goto error; } bcopy(fwcmd->data_buffer, flash_crc, 4); error: return rc; } int oce_mbox_get_phy_info(POCE_SOFTC sc, struct oce_phy_info *phy_info) { struct oce_mbx mbx; struct mbx_common_phy_info *fwcmd; int rc = 0; bzero(&mbx, sizeof(struct oce_mbx)); fwcmd = (struct mbx_common_phy_info *)&mbx.payload; mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_GET_PHY_CONFIG, MBX_TIMEOUT_SEC, sizeof(struct mbx_common_phy_info), OCE_MBX_VER_V0); mbx.u0.s.embedded = 1; mbx.payload_length = sizeof(struct mbx_common_phy_info); /* now post the command */ rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) { device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); goto error; } phy_info->phy_type = HOST_16(fwcmd->params.rsp.phy_info.phy_type); phy_info->interface_type = HOST_16(fwcmd->params.rsp.phy_info.interface_type); phy_info->auto_speeds_supported = HOST_16(fwcmd->params.rsp.phy_info.auto_speeds_supported); phy_info->fixed_speeds_supported = HOST_16(fwcmd->params.rsp.phy_info.fixed_speeds_supported); phy_info->misc_params = HOST_32(fwcmd->params.rsp.phy_info.misc_params); error: return rc; } int oce_mbox_lancer_write_flashrom(POCE_SOFTC sc, uint32_t data_size, uint32_t data_offset, POCE_DMA_MEM pdma_mem, uint32_t *written_data, uint32_t *additional_status) { struct oce_mbx mbx; struct mbx_lancer_common_write_object *fwcmd = NULL; int rc = 0, payload_len = 0; bzero(&mbx, sizeof(struct oce_mbx)); payload_len = sizeof(struct mbx_lancer_common_write_object); mbx.u0.s.embedded = 1;/* Embedded */ mbx.payload_length = payload_len; fwcmd = (struct mbx_lancer_common_write_object *)&mbx.payload; /* initialize the ioctl header */ mbx_common_req_hdr_init(&fwcmd->params.req.hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_WRITE_OBJECT, LONG_TIMEOUT, payload_len, OCE_MBX_VER_V0); fwcmd->params.req.write_length = data_size; if (data_size == 0) fwcmd->params.req.eof = 1; else fwcmd->params.req.eof = 0; strcpy(fwcmd->params.req.object_name, "/prg"); fwcmd->params.req.descriptor_count = 1; fwcmd->params.req.write_offset = data_offset; fwcmd->params.req.buffer_length = data_size; fwcmd->params.req.address_lower = pdma_mem->paddr & 0xFFFFFFFF; fwcmd->params.req.address_upper = upper_32_bits(pdma_mem->paddr); /* post the command */ rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->params.rsp.status; if (rc) { device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->params.rsp.additional_status); goto error; } *written_data = HOST_32(fwcmd->params.rsp.actual_write_length); *additional_status = fwcmd->params.rsp.additional_status; error: return rc; } int oce_mbox_create_rq(struct oce_rq *rq) { struct oce_mbx mbx; struct mbx_create_nic_rq *fwcmd; POCE_SOFTC sc = rq->parent; int rc, num_pages = 0; if (rq->qstate == QCREATED) return 0; bzero(&mbx, sizeof(struct oce_mbx)); fwcmd = (struct mbx_create_nic_rq *)&mbx.payload; mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_NIC, NIC_CREATE_RQ, MBX_TIMEOUT_SEC, sizeof(struct mbx_create_nic_rq), OCE_MBX_VER_V0); /* oce_page_list will also prepare pages */ num_pages = oce_page_list(rq->ring, &fwcmd->params.req.pages[0]); if (IS_XE201(sc)) { fwcmd->params.req.frag_size = rq->cfg.frag_size/2048; fwcmd->params.req.page_size = 1; fwcmd->hdr.u0.req.version = OCE_MBX_VER_V1; } else fwcmd->params.req.frag_size = OCE_LOG2(rq->cfg.frag_size); fwcmd->params.req.num_pages = num_pages; fwcmd->params.req.cq_id = rq->cq->cq_id; fwcmd->params.req.if_id = sc->if_id; fwcmd->params.req.max_frame_size = rq->cfg.mtu; fwcmd->params.req.is_rss_queue = rq->cfg.is_rss_queue; mbx.u0.s.embedded = 1; mbx.payload_length = sizeof(struct mbx_create_nic_rq); rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) { device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); goto error; } rq->rq_id = HOST_16(fwcmd->params.rsp.rq_id); rq->rss_cpuid = fwcmd->params.rsp.rss_cpuid; error: return rc; } int oce_mbox_create_wq(struct oce_wq *wq) { struct oce_mbx mbx; struct mbx_create_nic_wq *fwcmd; POCE_SOFTC sc = wq->parent; int rc = 0, version, num_pages; bzero(&mbx, sizeof(struct oce_mbx)); fwcmd = (struct mbx_create_nic_wq *)&mbx.payload; if (IS_XE201(sc)) version = OCE_MBX_VER_V1; else if(IS_BE(sc)) IS_PROFILE_SUPER_NIC(sc) ? (version = OCE_MBX_VER_V2) : (version = OCE_MBX_VER_V0); else version = OCE_MBX_VER_V2; if (version > OCE_MBX_VER_V0) fwcmd->params.req.if_id = sc->if_id; mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_NIC, NIC_CREATE_WQ, MBX_TIMEOUT_SEC, sizeof(struct mbx_create_nic_wq), version); num_pages = oce_page_list(wq->ring, &fwcmd->params.req.pages[0]); fwcmd->params.req.nic_wq_type = wq->cfg.wq_type; fwcmd->params.req.num_pages = num_pages; fwcmd->params.req.wq_size = OCE_LOG2(wq->cfg.q_len) + 1; fwcmd->params.req.cq_id = wq->cq->cq_id; fwcmd->params.req.ulp_num = 1; mbx.u0.s.embedded = 1; mbx.payload_length = sizeof(struct mbx_create_nic_wq); rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) { device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); goto error; } wq->wq_id = HOST_16(fwcmd->params.rsp.wq_id); if (version == OCE_MBX_VER_V2) wq->db_offset = HOST_32(fwcmd->params.rsp.db_offset); else wq->db_offset = PD_TXULP_DB; error: return rc; } int oce_mbox_create_eq(struct oce_eq *eq) { struct oce_mbx mbx; struct mbx_create_common_eq *fwcmd; POCE_SOFTC sc = eq->parent; int rc = 0; uint32_t num_pages; bzero(&mbx, sizeof(struct oce_mbx)); fwcmd = (struct mbx_create_common_eq *)&mbx.payload; mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_CREATE_EQ, MBX_TIMEOUT_SEC, sizeof(struct mbx_create_common_eq), OCE_MBX_VER_V0); num_pages = oce_page_list(eq->ring, &fwcmd->params.req.pages[0]); fwcmd->params.req.ctx.num_pages = num_pages; fwcmd->params.req.ctx.valid = 1; fwcmd->params.req.ctx.size = (eq->eq_cfg.item_size == 4) ? 0 : 1; fwcmd->params.req.ctx.count = OCE_LOG2(eq->eq_cfg.q_len / 256); fwcmd->params.req.ctx.armed = 0; fwcmd->params.req.ctx.delay_mult = eq->eq_cfg.cur_eqd; mbx.u0.s.embedded = 1; mbx.payload_length = sizeof(struct mbx_create_common_eq); rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) { device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); goto error; } eq->eq_id = HOST_16(fwcmd->params.rsp.eq_id); error: return rc; } int oce_mbox_cq_create(struct oce_cq *cq, uint32_t ncoalesce, uint32_t is_eventable) { struct oce_mbx mbx; struct mbx_create_common_cq *fwcmd; POCE_SOFTC sc = cq->parent; uint8_t version; oce_cq_ctx_t *ctx; uint32_t num_pages, page_size; int rc = 0; bzero(&mbx, sizeof(struct oce_mbx)); fwcmd = (struct mbx_create_common_cq *)&mbx.payload; if (IS_XE201(sc)) version = OCE_MBX_VER_V2; else version = OCE_MBX_VER_V0; mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_CREATE_CQ, MBX_TIMEOUT_SEC, sizeof(struct mbx_create_common_cq), version); ctx = &fwcmd->params.req.cq_ctx; num_pages = oce_page_list(cq->ring, &fwcmd->params.req.pages[0]); page_size = 1; /* 1 for 4K */ if (version == OCE_MBX_VER_V2) { ctx->v2.num_pages = LE_16(num_pages); ctx->v2.page_size = page_size; ctx->v2.eventable = is_eventable; ctx->v2.valid = 1; ctx->v2.count = OCE_LOG2(cq->cq_cfg.q_len / 256); ctx->v2.nodelay = cq->cq_cfg.nodelay; ctx->v2.coalesce_wm = ncoalesce; ctx->v2.armed = 0; ctx->v2.eq_id = cq->eq->eq_id; if (ctx->v2.count == 3) { if ((u_int)cq->cq_cfg.q_len > (4*1024)-1) ctx->v2.cqe_count = (4*1024)-1; else ctx->v2.cqe_count = cq->cq_cfg.q_len; } } else { ctx->v0.num_pages = LE_16(num_pages); ctx->v0.eventable = is_eventable; ctx->v0.valid = 1; ctx->v0.count = OCE_LOG2(cq->cq_cfg.q_len / 256); ctx->v0.nodelay = cq->cq_cfg.nodelay; ctx->v0.coalesce_wm = ncoalesce; ctx->v0.armed = 0; ctx->v0.eq_id = cq->eq->eq_id; } mbx.u0.s.embedded = 1; mbx.payload_length = sizeof(struct mbx_create_common_cq); rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) { device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); goto error; } cq->cq_id = HOST_16(fwcmd->params.rsp.cq_id); error: return rc; } int oce_mbox_read_transrecv_data(POCE_SOFTC sc, uint32_t page_num) { int rc = 0; struct oce_mbx mbx; struct mbx_read_common_transrecv_data *fwcmd; struct oce_mq_sge *sgl; OCE_DMA_MEM dma; /* Allocate DMA mem*/ if (oce_dma_alloc(sc, sizeof(struct mbx_read_common_transrecv_data), &dma, 0)) return ENOMEM; fwcmd = OCE_DMAPTR(&dma, struct mbx_read_common_transrecv_data); bzero(fwcmd, sizeof(struct mbx_read_common_transrecv_data)); bzero(&mbx, sizeof(struct oce_mbx)); mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_READ_TRANSRECEIVER_DATA, MBX_TIMEOUT_SEC, sizeof(struct mbx_read_common_transrecv_data), OCE_MBX_VER_V0); /* fill rest of mbx */ mbx.u0.s.embedded = 0; mbx.payload_length = sizeof(struct mbx_read_common_transrecv_data); mbx.u0.s.sge_count = 1; sgl = &mbx.payload.u0.u1.sgl[0]; sgl->pa_hi = htole32(upper_32_bits(dma.paddr)); sgl->pa_lo = htole32((dma.paddr) & 0xFFFFFFFF); sgl->length = htole32(mbx.payload_length); DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); fwcmd->params.req.port = LE_32(sc->port_id); fwcmd->params.req.page_num = LE_32(page_num); /* command post */ rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) { device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); goto error; } if(fwcmd->params.rsp.page_num == PAGE_NUM_A0) { bcopy((char *)fwcmd->params.rsp.page_data, (char *)&sfp_vpd_dump_buffer[0], TRANSCEIVER_A0_SIZE); } if(fwcmd->params.rsp.page_num == PAGE_NUM_A2) { bcopy((char *)fwcmd->params.rsp.page_data, (char *)&sfp_vpd_dump_buffer[32], TRANSCEIVER_A2_SIZE); } error: oce_dma_free(sc, &dma); return rc; } void oce_mbox_eqd_modify_periodic(POCE_SOFTC sc, struct oce_set_eqd *set_eqd, int num) { struct oce_mbx mbx; struct mbx_modify_common_eq_delay *fwcmd; int rc = 0; int i = 0; bzero(&mbx, sizeof(struct oce_mbx)); /* Initialize MODIFY_EQ_DELAY ioctl header */ fwcmd = (struct mbx_modify_common_eq_delay *)&mbx.payload; mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_MODIFY_EQ_DELAY, MBX_TIMEOUT_SEC, sizeof(struct mbx_modify_common_eq_delay), OCE_MBX_VER_V0); /* fill rest of mbx */ mbx.u0.s.embedded = 1; mbx.payload_length = sizeof(struct mbx_modify_common_eq_delay); DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); fwcmd->params.req.num_eq = num; for (i = 0; i < num; i++) { fwcmd->params.req.delay[i].eq_id = htole32(set_eqd[i].eq_id); fwcmd->params.req.delay[i].phase = 0; fwcmd->params.req.delay[i].dm = htole32(set_eqd[i].delay_multiplier); } /* command post */ rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); } int oce_get_profile_config(POCE_SOFTC sc, uint32_t max_rss) { struct oce_mbx mbx; struct mbx_common_get_profile_config *fwcmd; int rc = 0; int version = 0; struct oce_mq_sge *sgl; OCE_DMA_MEM dma; uint32_t desc_count = 0; struct oce_nic_resc_desc *nic_desc = NULL; int i; boolean_t nic_desc_valid = FALSE; if (IS_BE2(sc)) return -1; /* Allocate DMA mem*/ if (oce_dma_alloc(sc, sizeof(struct mbx_common_get_profile_config), &dma, 0)) return ENOMEM; /* Initialize MODIFY_EQ_DELAY ioctl header */ fwcmd = OCE_DMAPTR(&dma, struct mbx_common_get_profile_config); bzero(fwcmd, sizeof(struct mbx_common_get_profile_config)); if (!IS_XE201(sc)) version = OCE_MBX_VER_V1; else version = OCE_MBX_VER_V0; bzero(&mbx, sizeof(struct oce_mbx)); mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_GET_PROFILE_CONFIG, MBX_TIMEOUT_SEC, sizeof(struct mbx_common_get_profile_config), version); /* fill rest of mbx */ mbx.u0.s.embedded = 0; mbx.payload_length = sizeof(struct mbx_common_get_profile_config); mbx.u0.s.sge_count = 1; sgl = &mbx.payload.u0.u1.sgl[0]; sgl->pa_hi = htole32(upper_32_bits(dma.paddr)); sgl->pa_lo = htole32((dma.paddr) & 0xFFFFFFFF); sgl->length = htole32(mbx.payload_length); DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); fwcmd->params.req.type = ACTIVE_PROFILE; /* command post */ rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) { device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); goto error; } nic_desc = (struct oce_nic_resc_desc *) fwcmd->params.rsp.resources; desc_count = HOST_32(fwcmd->params.rsp.desc_count); for (i = 0; i < desc_count; i++) { if ((nic_desc->desc_type == NIC_RESC_DESC_TYPE_V0) || (nic_desc->desc_type == NIC_RESC_DESC_TYPE_V1)) { nic_desc_valid = TRUE; break; } nic_desc = (struct oce_nic_resc_desc *) \ ((char *)nic_desc + nic_desc->desc_len); } if (!nic_desc_valid) { rc = -1; goto error; } else { sc->max_vlans = HOST_16(nic_desc->vlan_count); sc->nwqs = HOST_16(nic_desc->txq_count); if (sc->nwqs) sc->nwqs = MIN(sc->nwqs, OCE_MAX_WQ); else sc->nwqs = OCE_MAX_WQ; sc->nrssqs = HOST_16(nic_desc->rssq_count); if (sc->nrssqs) sc->nrssqs = MIN(sc->nrssqs, max_rss); else sc->nrssqs = max_rss; sc->nrqs = sc->nrssqs + 1; /* 1 for def RX */ } error: oce_dma_free(sc, &dma); return rc; } int oce_get_func_config(POCE_SOFTC sc) { struct oce_mbx mbx; struct mbx_common_get_func_config *fwcmd; int rc = 0; int version = 0; struct oce_mq_sge *sgl; OCE_DMA_MEM dma; uint32_t desc_count = 0; struct oce_nic_resc_desc *nic_desc = NULL; int i; boolean_t nic_desc_valid = FALSE; uint32_t max_rss = 0; if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native)) max_rss = OCE_LEGACY_MODE_RSS; else max_rss = OCE_MAX_RSS; /* Allocate DMA mem*/ if (oce_dma_alloc(sc, sizeof(struct mbx_common_get_func_config), &dma, 0)) return ENOMEM; /* Initialize MODIFY_EQ_DELAY ioctl header */ fwcmd = OCE_DMAPTR(&dma, struct mbx_common_get_func_config); bzero(fwcmd, sizeof(struct mbx_common_get_func_config)); if (IS_SH(sc)) version = OCE_MBX_VER_V1; else version = OCE_MBX_VER_V0; bzero(&mbx, sizeof(struct oce_mbx)); mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_GET_FUNCTION_CONFIG, MBX_TIMEOUT_SEC, sizeof(struct mbx_common_get_func_config), version); /* fill rest of mbx */ mbx.u0.s.embedded = 0; mbx.payload_length = sizeof(struct mbx_common_get_func_config); mbx.u0.s.sge_count = 1; sgl = &mbx.payload.u0.u1.sgl[0]; sgl->pa_hi = htole32(upper_32_bits(dma.paddr)); sgl->pa_lo = htole32((dma.paddr) & 0xFFFFFFFF); sgl->length = htole32(mbx.payload_length); DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); /* command post */ rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) { device_printf(sc->dev, "%s failed - cmd status: %d addi status: %d\n", __FUNCTION__, rc, fwcmd->hdr.u0.rsp.additional_status); goto error; } nic_desc = (struct oce_nic_resc_desc *) fwcmd->params.rsp.resources; desc_count = HOST_32(fwcmd->params.rsp.desc_count); for (i = 0; i < desc_count; i++) { if ((nic_desc->desc_type == NIC_RESC_DESC_TYPE_V0) || (nic_desc->desc_type == NIC_RESC_DESC_TYPE_V1)) { nic_desc_valid = TRUE; break; } nic_desc = (struct oce_nic_resc_desc *) \ ((char *)nic_desc + nic_desc->desc_len); } if (!nic_desc_valid) { rc = -1; goto error; } else { sc->max_vlans = nic_desc->vlan_count; sc->nwqs = HOST_32(nic_desc->txq_count); if (sc->nwqs) sc->nwqs = MIN(sc->nwqs, OCE_MAX_WQ); else sc->nwqs = OCE_MAX_WQ; sc->nrssqs = HOST_32(nic_desc->rssq_count); if (sc->nrssqs) sc->nrssqs = MIN(sc->nrssqs, max_rss); else sc->nrssqs = max_rss; sc->nrqs = sc->nrssqs + 1; /* 1 for def RX */ } error: oce_dma_free(sc, &dma); return rc; } + +/* hw lro functions */ + +int +oce_mbox_nic_query_lro_capabilities(POCE_SOFTC sc, uint32_t *lro_rq_cnt, uint32_t *lro_flags) +{ + struct oce_mbx mbx; + struct mbx_nic_query_lro_capabilities *fwcmd; + int rc = 0; + + bzero(&mbx, sizeof(struct oce_mbx)); + + fwcmd = (struct mbx_nic_query_lro_capabilities *)&mbx.payload; + mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, + MBX_SUBSYSTEM_NIC, + 0x20,MBX_TIMEOUT_SEC, + sizeof(struct mbx_nic_query_lro_capabilities), + OCE_MBX_VER_V0); + + mbx.u0.s.embedded = 1; + mbx.payload_length = sizeof(struct mbx_nic_query_lro_capabilities); + + rc = oce_mbox_post(sc, &mbx, NULL); + if (!rc) + rc = fwcmd->hdr.u0.rsp.status; + if (rc) { + device_printf(sc->dev, + "%s failed - cmd status: %d addi status: %d\n", + __FUNCTION__, rc, + fwcmd->hdr.u0.rsp.additional_status); + + return rc; + } + if(lro_flags) + *lro_flags = HOST_32(fwcmd->params.rsp.lro_flags); + + if(lro_rq_cnt) + *lro_rq_cnt = HOST_16(fwcmd->params.rsp.lro_rq_cnt); + + return rc; +} + +int +oce_mbox_nic_set_iface_lro_config(POCE_SOFTC sc, int enable) +{ + struct oce_mbx mbx; + struct mbx_nic_set_iface_lro_config *fwcmd; + int rc = 0; + + bzero(&mbx, sizeof(struct oce_mbx)); + + fwcmd = (struct mbx_nic_set_iface_lro_config *)&mbx.payload; + mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, + MBX_SUBSYSTEM_NIC, + 0x26,MBX_TIMEOUT_SEC, + sizeof(struct mbx_nic_set_iface_lro_config), + OCE_MBX_VER_V0); + + mbx.u0.s.embedded = 1; + mbx.payload_length = sizeof(struct mbx_nic_set_iface_lro_config); + + fwcmd->params.req.iface_id = sc->if_id; + fwcmd->params.req.lro_flags = 0; + + if(enable) { + fwcmd->params.req.lro_flags = LRO_FLAGS_HASH_MODE | LRO_FLAGS_RSS_MODE; + fwcmd->params.req.lro_flags |= LRO_FLAGS_CLSC_IPV4 | LRO_FLAGS_CLSC_IPV6; + + fwcmd->params.req.max_clsc_byte_cnt = 64*1024; /* min = 2974, max = 0xfa59 */ + fwcmd->params.req.max_clsc_seg_cnt = 43; /* min = 2, max = 64 */ + fwcmd->params.req.max_clsc_usec_delay = 18; /* min = 1, max = 256 */ + fwcmd->params.req.min_clsc_frame_byte_cnt = 0; /* min = 1, max = 9014 */ + } + + rc = oce_mbox_post(sc, &mbx, NULL); + if (!rc) + rc = fwcmd->hdr.u0.rsp.status; + if (rc) { + device_printf(sc->dev, + "%s failed - cmd status: %d addi status: %d\n", + __FUNCTION__, rc, + fwcmd->hdr.u0.rsp.additional_status); + + return rc; + } + return rc; +} + +int +oce_mbox_create_rq_v2(struct oce_rq *rq) +{ + struct oce_mbx mbx; + struct mbx_create_nic_rq_v2 *fwcmd; + POCE_SOFTC sc = rq->parent; + int rc = 0, num_pages = 0; + + if (rq->qstate == QCREATED) + return 0; + + bzero(&mbx, sizeof(struct oce_mbx)); + + fwcmd = (struct mbx_create_nic_rq_v2 *)&mbx.payload; + mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, + MBX_SUBSYSTEM_NIC, + 0x08, MBX_TIMEOUT_SEC, + sizeof(struct mbx_create_nic_rq_v2), + OCE_MBX_VER_V2); + + /* oce_page_list will also prepare pages */ + num_pages = oce_page_list(rq->ring, &fwcmd->params.req.pages[0]); + + fwcmd->params.req.cq_id = rq->cq->cq_id; + fwcmd->params.req.frag_size = rq->cfg.frag_size/2048; + fwcmd->params.req.num_pages = num_pages; + + fwcmd->params.req.if_id = sc->if_id; + + fwcmd->params.req.max_frame_size = rq->cfg.mtu; + fwcmd->params.req.page_size = 1; + if(rq->cfg.is_rss_queue) { + fwcmd->params.req.rq_flags = (NIC_RQ_FLAGS_RSS | NIC_RQ_FLAGS_LRO); + }else { + device_printf(sc->dev, + "non rss lro queue should not be created \n"); + goto error; + } + mbx.u0.s.embedded = 1; + mbx.payload_length = sizeof(struct mbx_create_nic_rq_v2); + + rc = oce_mbox_post(sc, &mbx, NULL); + if (!rc) + rc = fwcmd->hdr.u0.rsp.status; + if (rc) { + device_printf(sc->dev, + "%s failed - cmd status: %d addi status: %d\n", + __FUNCTION__, rc, + fwcmd->hdr.u0.rsp.additional_status); + goto error; + } + rq->rq_id = HOST_16(fwcmd->params.rsp.rq_id); + rq->rss_cpuid = fwcmd->params.rsp.rss_cpuid; + +error: + return rc; +} + Index: stable/11/sys/dev/oce/oce_queue.c =================================================================== --- stable/11/sys/dev/oce/oce_queue.c (revision 338937) +++ stable/11/sys/dev/oce/oce_queue.c (revision 338938) @@ -1,1240 +1,1403 @@ /*- * Copyright (C) 2013 Emulex * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Emulex Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * freebsd-drivers@emulex.com * * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 */ /* $FreeBSD$ */ #include "oce_if.h" /***************************************************** * local queue functions *****************************************************/ static struct oce_wq *oce_wq_init(POCE_SOFTC sc, uint32_t q_len, uint32_t wq_type); static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq); static void oce_wq_free(struct oce_wq *wq); static void oce_wq_del(struct oce_wq *wq); static struct oce_rq *oce_rq_init(POCE_SOFTC sc, uint32_t q_len, uint32_t frag_size, uint32_t mtu, uint32_t rss); static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq); static void oce_rq_free(struct oce_rq *rq); static void oce_rq_del(struct oce_rq *rq); static struct oce_eq *oce_eq_create(POCE_SOFTC sc, uint32_t q_len, uint32_t item_size, uint32_t eq_delay, uint32_t vector); static void oce_eq_del(struct oce_eq *eq); static struct oce_mq *oce_mq_create(POCE_SOFTC sc, struct oce_eq *eq, uint32_t q_len); static void oce_mq_free(struct oce_mq *mq); static int oce_destroy_q(POCE_SOFTC sc, struct oce_mbx - *mbx, size_t req_size, enum qtype qtype); + *mbx, size_t req_size, enum qtype qtype, int version); struct oce_cq *oce_cq_create(POCE_SOFTC sc, struct oce_eq *eq, uint32_t q_len, uint32_t item_size, uint32_t sol_event, uint32_t is_eventable, uint32_t nodelay, uint32_t ncoalesce); static void oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq); /** * @brief Create and initialize all the queues on the board * @param sc software handle to the device * @returns 0 if successful, or error **/ int oce_queue_init_all(POCE_SOFTC sc) { int rc = 0, i, vector; struct oce_wq *wq; struct oce_rq *rq; struct oce_aic_obj *aic; /* alloc TX/RX queues */ for_all_wq_queues(sc, wq, i) { sc->wq[i] = oce_wq_init(sc, sc->tx_ring_size, NIC_WQ_TYPE_STANDARD); if (!sc->wq[i]) goto error; } for_all_rq_queues(sc, rq, i) { sc->rq[i] = oce_rq_init(sc, sc->rx_ring_size, sc->rq_frag_size, OCE_MAX_JUMBO_FRAME_SIZE, (i == 0) ? 0 : is_rss_enabled(sc)); if (!sc->rq[i]) goto error; } /* Create network interface on card */ if (oce_create_nw_interface(sc)) goto error; /* create all of the event queues */ for (vector = 0; vector < sc->intr_count; vector++) { /* setup aic defaults for each event queue */ aic = &sc->aic_obj[vector]; aic->max_eqd = OCE_MAX_EQD; aic->min_eqd = OCE_MIN_EQD; aic->et_eqd = OCE_MIN_EQD; aic->enable = TRUE; + + sc->eq[vector] = oce_eq_create(sc, sc->enable_hwlro ? EQ_LEN_2048 : EQ_LEN_1024, + EQE_SIZE_4,0, vector); - sc->eq[vector] = oce_eq_create(sc, EQ_LEN_1024, EQE_SIZE_4, - 0, vector); if (!sc->eq[vector]) goto error; } /* create Tx, Rx and mcc queues */ for_all_wq_queues(sc, wq, i) { rc = oce_wq_create(wq, sc->eq[i]); if (rc) goto error; wq->queue_index = i; TASK_INIT(&wq->txtask, 1, oce_tx_task, wq); } for_all_rq_queues(sc, rq, i) { rc = oce_rq_create(rq, sc->if_id, sc->eq[(i == 0) ? 0:(i-1)]); if (rc) goto error; rq->queue_index = i; } sc->mq = oce_mq_create(sc, sc->eq[0], 64); if (!sc->mq) goto error; return rc; error: oce_queue_release_all(sc); return 1; } /** * @brief Releases all mailbox queues created * @param sc software handle to the device */ void oce_queue_release_all(POCE_SOFTC sc) { int i = 0; struct oce_wq *wq; struct oce_rq *rq; struct oce_eq *eq; + /* before deleting lro queues, we have to disable hwlro */ + if(sc->enable_hwlro) + oce_mbox_nic_set_iface_lro_config(sc, 0); + for_all_rq_queues(sc, rq, i) { if (rq) { oce_rq_del(sc->rq[i]); oce_rq_free(sc->rq[i]); } } for_all_wq_queues(sc, wq, i) { if (wq) { oce_wq_del(sc->wq[i]); oce_wq_free(sc->wq[i]); } } if (sc->mq) oce_mq_free(sc->mq); for_all_evnt_queues(sc, eq, i) { if (eq) oce_eq_del(sc->eq[i]); } } /** * @brief Function to create a WQ for NIC Tx * @param sc software handle to the device * @param qlen number of entries in the queue * @param wq_type work queue type * @returns the pointer to the WQ created or NULL on failure */ static struct oce_wq *oce_wq_init(POCE_SOFTC sc, uint32_t q_len, uint32_t wq_type) { struct oce_wq *wq; int rc = 0, i; /* q_len must be min 256 and max 2k */ if (q_len < 256 || q_len > 2048) { device_printf(sc->dev, "Invalid q length. Must be " "[256, 2000]: 0x%x\n", q_len); return NULL; } /* allocate wq */ wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO); if (!wq) return NULL; /* Set the wq config */ wq->cfg.q_len = q_len; wq->cfg.wq_type = (uint8_t) wq_type; wq->cfg.eqd = OCE_DEFAULT_WQ_EQD; wq->cfg.nbufs = 2 * wq->cfg.q_len; wq->cfg.nhdl = 2 * wq->cfg.q_len; wq->parent = (void *)sc; rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, OCE_MAX_TX_SIZE, OCE_MAX_TX_ELEMENTS, PAGE_SIZE, 0, NULL, NULL, &wq->tag); if (rc) goto free_wq; for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) { rc = bus_dmamap_create(wq->tag, 0, &wq->pckts[i].map); if (rc) goto free_wq; } wq->ring = oce_create_ring_buffer(sc, q_len, NIC_WQE_SIZE); if (!wq->ring) goto free_wq; LOCK_CREATE(&wq->tx_lock, "TX_lock"); + LOCK_CREATE(&wq->tx_compl_lock, "WQ_HANDLER_LOCK"); #if __FreeBSD_version >= 800000 /* Allocate buf ring for multiqueue*/ wq->br = buf_ring_alloc(4096, M_DEVBUF, M_WAITOK, &wq->tx_lock.mutex); if (!wq->br) goto free_wq; #endif return wq; free_wq: device_printf(sc->dev, "Create WQ failed\n"); oce_wq_free(wq); return NULL; } /** * @brief Frees the work queue * @param wq pointer to work queue to free */ static void oce_wq_free(struct oce_wq *wq) { POCE_SOFTC sc = (POCE_SOFTC) wq->parent; int i; taskqueue_drain(taskqueue_swi, &wq->txtask); if (wq->ring != NULL) { oce_destroy_ring_buffer(sc, wq->ring); wq->ring = NULL; } for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) { if (wq->pckts[i].map != NULL) { bus_dmamap_unload(wq->tag, wq->pckts[i].map); bus_dmamap_destroy(wq->tag, wq->pckts[i].map); wq->pckts[i].map = NULL; } } if (wq->tag != NULL) bus_dma_tag_destroy(wq->tag); if (wq->br != NULL) buf_ring_free(wq->br, M_DEVBUF); LOCK_DESTROY(&wq->tx_lock); + LOCK_DESTROY(&wq->tx_compl_lock); free(wq, M_DEVBUF); } /** * @brief Create a work queue * @param wq pointer to work queue * @param eq pointer to associated event queue */ static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq) { POCE_SOFTC sc = wq->parent; struct oce_cq *cq; int rc = 0; /* create the CQ */ cq = oce_cq_create(sc, eq, CQ_LEN_1024, sizeof(struct oce_nic_tx_cqe), 0, 1, 0, 3); if (!cq) return ENXIO; wq->cq = cq; rc = oce_mbox_create_wq(wq); if (rc) goto error; wq->qstate = QCREATED; wq->wq_free = wq->cfg.q_len; wq->ring->cidx = 0; wq->ring->pidx = 0; eq->cq[eq->cq_valid] = cq; eq->cq_valid++; cq->cb_arg = wq; cq->cq_handler = oce_wq_handler; return 0; error: device_printf(sc->dev, "WQ create failed\n"); oce_wq_del(wq); return rc; } /** * @brief Delete a work queue * @param wq pointer to work queue */ static void oce_wq_del(struct oce_wq *wq) { struct oce_mbx mbx; struct mbx_delete_nic_wq *fwcmd; POCE_SOFTC sc = (POCE_SOFTC) wq->parent; if (wq->qstate == QCREATED) { bzero(&mbx, sizeof(struct oce_mbx)); /* now fill the command */ fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload; fwcmd->params.req.wq_id = wq->wq_id; (void)oce_destroy_q(sc, &mbx, - sizeof(struct mbx_delete_nic_wq), QTYPE_WQ); + sizeof(struct mbx_delete_nic_wq), QTYPE_WQ, 0); wq->qstate = QDELETED; } if (wq->cq != NULL) { oce_cq_del(sc, wq->cq); wq->cq = NULL; } } /** * @brief function to allocate receive queue resources * @param sc software handle to the device * @param q_len length of receive queue * @param frag_size size of an receive queue fragment * @param mtu maximum transmission unit * @param rss is-rss-queue flag * @returns the pointer to the RQ created or NULL on failure */ static struct oce_rq *oce_rq_init(POCE_SOFTC sc, uint32_t q_len, uint32_t frag_size, uint32_t mtu, uint32_t rss) { struct oce_rq *rq; int rc = 0, i; if (OCE_LOG2(frag_size) <= 0) return NULL; if ((q_len == 0) || (q_len > 1024)) return NULL; /* allocate the rq */ rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO); if (!rq) return NULL; rq->cfg.q_len = q_len; rq->cfg.frag_size = frag_size; rq->cfg.mtu = mtu; rq->cfg.eqd = 0; rq->lro_pkts_queued = 0; rq->cfg.is_rss_queue = rss; - rq->packets_in = 0; - rq->packets_out = 0; rq->pending = 0; rq->parent = (void *)sc; rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), - 1, 0, - BUS_SPACE_MAXADDR, - BUS_SPACE_MAXADDR, - NULL, NULL, - OCE_MAX_RX_SIZE, - 1, PAGE_SIZE, 0, NULL, NULL, &rq->tag); - + 1, 0, + BUS_SPACE_MAXADDR, + BUS_SPACE_MAXADDR, + NULL, NULL, + oce_rq_buf_size, + 1, oce_rq_buf_size, 0, NULL, NULL, &rq->tag); if (rc) goto free_rq; for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) { rc = bus_dmamap_create(rq->tag, 0, &rq->pckts[i].map); if (rc) goto free_rq; } /* create the ring buffer */ rq->ring = oce_create_ring_buffer(sc, q_len, sizeof(struct oce_nic_rqe)); if (!rq->ring) goto free_rq; LOCK_CREATE(&rq->rx_lock, "RX_lock"); return rq; free_rq: device_printf(sc->dev, "Create RQ failed\n"); oce_rq_free(rq); return NULL; } /** * @brief Free a receive queue * @param rq pointer to receive queue */ static void oce_rq_free(struct oce_rq *rq) { POCE_SOFTC sc = (POCE_SOFTC) rq->parent; int i = 0 ; if (rq->ring != NULL) { oce_destroy_ring_buffer(sc, rq->ring); rq->ring = NULL; } for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) { if (rq->pckts[i].map != NULL) { bus_dmamap_unload(rq->tag, rq->pckts[i].map); bus_dmamap_destroy(rq->tag, rq->pckts[i].map); rq->pckts[i].map = NULL; } if (rq->pckts[i].mbuf) { m_free(rq->pckts[i].mbuf); rq->pckts[i].mbuf = NULL; } } if (rq->tag != NULL) bus_dma_tag_destroy(rq->tag); LOCK_DESTROY(&rq->rx_lock); free(rq, M_DEVBUF); } /** * @brief Create a receive queue * @param rq receive queue * @param if_id interface identifier index` * @param eq pointer to event queue */ static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq) { POCE_SOFTC sc = rq->parent; struct oce_cq *cq; - cq = oce_cq_create(sc, - eq, - CQ_LEN_1024, - sizeof(struct oce_nic_rx_cqe), 0, 1, 0, 3); + cq = oce_cq_create(sc, eq, + sc->enable_hwlro ? CQ_LEN_2048 : CQ_LEN_1024, + sizeof(struct oce_nic_rx_cqe), 0, 1, 0, 3); + if (!cq) return ENXIO; rq->cq = cq; rq->cfg.if_id = if_id; /* Dont create RQ here. Create in if_activate */ rq->qstate = 0; rq->ring->cidx = 0; rq->ring->pidx = 0; eq->cq[eq->cq_valid] = cq; eq->cq_valid++; cq->cb_arg = rq; cq->cq_handler = oce_rq_handler; return 0; } /** * @brief Delete a receive queue * @param rq receive queue */ static void oce_rq_del(struct oce_rq *rq) { POCE_SOFTC sc = (POCE_SOFTC) rq->parent; struct oce_mbx mbx; struct mbx_delete_nic_rq *fwcmd; + struct mbx_delete_nic_rq_v1 *fwcmd1; if (rq->qstate == QCREATED) { bzero(&mbx, sizeof(mbx)); - - fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload; - fwcmd->params.req.rq_id = rq->rq_id; - (void)oce_destroy_q(sc, &mbx, - sizeof(struct mbx_delete_nic_rq), QTYPE_RQ); + if(!rq->islro) { + fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload; + fwcmd->params.req.rq_id = rq->rq_id; + (void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq), QTYPE_RQ, 0); + }else { + fwcmd1 = (struct mbx_delete_nic_rq_v1 *)&mbx.payload; + fwcmd1->params.req.rq_id = rq->rq_id; + fwcmd1->params.req.rq_flags = (NIC_RQ_FLAGS_RSS | NIC_RQ_FLAGS_LRO); + (void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq_v1), QTYPE_RQ, 1); + } rq->qstate = QDELETED; } if (rq->cq != NULL) { oce_cq_del(sc, rq->cq); rq->cq = NULL; } } /** * @brief function to create an event queue * @param sc software handle to the device * @param q_len length of event queue * @param item_size size of an event queue item * @param eq_delay event queue delay * @retval eq success, pointer to event queue * @retval NULL failure */ static struct oce_eq *oce_eq_create(POCE_SOFTC sc, uint32_t q_len, uint32_t item_size, uint32_t eq_delay, uint32_t vector) { struct oce_eq *eq; int rc = 0; /* allocate an eq */ eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO); if (eq == NULL) return NULL; eq->parent = (void *)sc; eq->eq_id = 0xffff; eq->ring = oce_create_ring_buffer(sc, q_len, item_size); if (!eq->ring) goto free_eq; eq->eq_cfg.q_len = q_len; eq->eq_cfg.item_size = item_size; eq->eq_cfg.cur_eqd = (uint8_t) eq_delay; rc = oce_mbox_create_eq(eq); if (rc) goto free_eq; sc->intrs[sc->neqs++].eq = eq; return eq; free_eq: oce_eq_del(eq); return NULL; } /** * @brief Function to delete an event queue * @param eq pointer to an event queue */ static void oce_eq_del(struct oce_eq *eq) { struct oce_mbx mbx; struct mbx_destroy_common_eq *fwcmd; POCE_SOFTC sc = (POCE_SOFTC) eq->parent; if (eq->eq_id != 0xffff) { bzero(&mbx, sizeof(mbx)); fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload; fwcmd->params.req.id = eq->eq_id; (void)oce_destroy_q(sc, &mbx, - sizeof(struct mbx_destroy_common_eq), QTYPE_EQ); + sizeof(struct mbx_destroy_common_eq), QTYPE_EQ, 0); } if (eq->ring != NULL) { oce_destroy_ring_buffer(sc, eq->ring); eq->ring = NULL; } free(eq, M_DEVBUF); } /** * @brief Function to create an MQ * @param sc software handle to the device * @param eq the EQ to associate with the MQ for event notification * @param q_len the number of entries to create in the MQ * @returns pointer to the created MQ, failure otherwise */ static struct oce_mq * oce_mq_create(POCE_SOFTC sc, struct oce_eq *eq, uint32_t q_len) { struct oce_mbx mbx; struct mbx_create_common_mq_ex *fwcmd = NULL; struct oce_mq *mq = NULL; int rc = 0; struct oce_cq *cq; oce_mq_ext_ctx_t *ctx; uint32_t num_pages; uint32_t page_size; int version; cq = oce_cq_create(sc, eq, CQ_LEN_256, sizeof(struct oce_mq_cqe), 1, 1, 0, 0); if (!cq) return NULL; /* allocate the mq */ mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO); if (!mq) { oce_cq_del(sc, cq); goto error; } mq->parent = sc; mq->ring = oce_create_ring_buffer(sc, q_len, sizeof(struct oce_mbx)); if (!mq->ring) goto error; bzero(&mbx, sizeof(struct oce_mbx)); IS_XE201(sc) ? (version = OCE_MBX_VER_V1) : (version = OCE_MBX_VER_V0); fwcmd = (struct mbx_create_common_mq_ex *)&mbx.payload; mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, MBX_SUBSYSTEM_COMMON, OPCODE_COMMON_CREATE_MQ_EXT, MBX_TIMEOUT_SEC, sizeof(struct mbx_create_common_mq_ex), version); num_pages = oce_page_list(mq->ring, &fwcmd->params.req.pages[0]); page_size = mq->ring->num_items * mq->ring->item_size; ctx = &fwcmd->params.req.context; if (IS_XE201(sc)) { ctx->v1.num_pages = num_pages; ctx->v1.ring_size = OCE_LOG2(q_len) + 1; ctx->v1.cq_id = cq->cq_id; ctx->v1.valid = 1; ctx->v1.async_cq_id = cq->cq_id; ctx->v1.async_cq_valid = 1; /* Subscribe to Link State and Group 5 Events(bits 1 & 5 set) */ ctx->v1.async_evt_bitmap |= LE_32(0x00000022); ctx->v1.async_evt_bitmap |= LE_32(1 << ASYNC_EVENT_CODE_DEBUG); ctx->v1.async_evt_bitmap |= LE_32(1 << ASYNC_EVENT_CODE_SLIPORT); } else { ctx->v0.num_pages = num_pages; ctx->v0.cq_id = cq->cq_id; ctx->v0.ring_size = OCE_LOG2(q_len) + 1; ctx->v0.valid = 1; /* Subscribe to Link State and Group5 Events(bits 1 & 5 set) */ ctx->v0.async_evt_bitmap = 0xffffffff; } mbx.u0.s.embedded = 1; mbx.payload_length = sizeof(struct mbx_create_common_mq_ex); DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); rc = oce_mbox_post(sc, &mbx, NULL); if (!rc) rc = fwcmd->hdr.u0.rsp.status; if (rc) { device_printf(sc->dev,"%s failed - cmd status: %d\n", __FUNCTION__, rc); goto error; } mq->mq_id = LE_16(fwcmd->params.rsp.mq_id); mq->cq = cq; eq->cq[eq->cq_valid] = cq; eq->cq_valid++; mq->cq->eq = eq; mq->cfg.q_len = (uint8_t) q_len; mq->cfg.eqd = 0; mq->qstate = QCREATED; mq->cq->cb_arg = mq; mq->cq->cq_handler = oce_mq_handler; return mq; error: device_printf(sc->dev, "MQ create failed\n"); oce_mq_free(mq); mq = NULL; return mq; } /** * @brief Function to free a mailbox queue * @param mq pointer to a mailbox queue */ static void oce_mq_free(struct oce_mq *mq) { POCE_SOFTC sc = (POCE_SOFTC) mq->parent; struct oce_mbx mbx; struct mbx_destroy_common_mq *fwcmd; if (!mq) return; if (mq->ring != NULL) { oce_destroy_ring_buffer(sc, mq->ring); mq->ring = NULL; if (mq->qstate == QCREATED) { bzero(&mbx, sizeof (struct oce_mbx)); fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload; fwcmd->params.req.id = mq->mq_id; (void) oce_destroy_q(sc, &mbx, sizeof (struct mbx_destroy_common_mq), - QTYPE_MQ); + QTYPE_MQ, 0); } mq->qstate = QDELETED; } if (mq->cq != NULL) { oce_cq_del(sc, mq->cq); mq->cq = NULL; } free(mq, M_DEVBUF); mq = NULL; } /** * @brief Function to delete a EQ, CQ, MQ, WQ or RQ * @param sc sofware handle to the device * @param mbx mailbox command to send to the fw to delete the queue * (mbx contains the queue information to delete) * @param req_size the size of the mbx payload dependent on the qtype * @param qtype the type of queue i.e. EQ, CQ, MQ, WQ or RQ * @returns 0 on success, failure otherwise */ static int oce_destroy_q(POCE_SOFTC sc, struct oce_mbx *mbx, size_t req_size, - enum qtype qtype) + enum qtype qtype, int version) { struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload; int opcode; int subsys; int rc = 0; switch (qtype) { case QTYPE_EQ: opcode = OPCODE_COMMON_DESTROY_EQ; subsys = MBX_SUBSYSTEM_COMMON; break; case QTYPE_CQ: opcode = OPCODE_COMMON_DESTROY_CQ; subsys = MBX_SUBSYSTEM_COMMON; break; case QTYPE_MQ: opcode = OPCODE_COMMON_DESTROY_MQ; subsys = MBX_SUBSYSTEM_COMMON; break; case QTYPE_WQ: opcode = NIC_DELETE_WQ; subsys = MBX_SUBSYSTEM_NIC; break; case QTYPE_RQ: opcode = NIC_DELETE_RQ; subsys = MBX_SUBSYSTEM_NIC; break; default: return EINVAL; } mbx_common_req_hdr_init(hdr, 0, 0, subsys, opcode, MBX_TIMEOUT_SEC, req_size, - OCE_MBX_VER_V0); + version); mbx->u0.s.embedded = 1; mbx->payload_length = (uint32_t) req_size; DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ); rc = oce_mbox_post(sc, mbx, NULL); if (!rc) rc = hdr->u0.rsp.status; if (rc) device_printf(sc->dev,"%s failed - cmd status: %d\n", __FUNCTION__, rc); return rc; } /** * @brief Function to create a completion queue * @param sc software handle to the device * @param eq optional eq to be associated with to the cq * @param q_len length of completion queue * @param item_size size of completion queue items * @param sol_event command context event * @param is_eventable event table * @param nodelay no delay flag * @param ncoalesce no coalescence flag * @returns pointer to the cq created, NULL on failure */ struct oce_cq * oce_cq_create(POCE_SOFTC sc, struct oce_eq *eq, uint32_t q_len, uint32_t item_size, uint32_t sol_event, uint32_t is_eventable, uint32_t nodelay, uint32_t ncoalesce) { struct oce_cq *cq = NULL; int rc = 0; cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO); if (!cq) return NULL; cq->ring = oce_create_ring_buffer(sc, q_len, item_size); if (!cq->ring) goto error; cq->parent = sc; cq->eq = eq; cq->cq_cfg.q_len = q_len; cq->cq_cfg.item_size = item_size; cq->cq_cfg.nodelay = (uint8_t) nodelay; rc = oce_mbox_cq_create(cq, ncoalesce, is_eventable); if (rc) goto error; sc->cq[sc->ncqs++] = cq; return cq; error: device_printf(sc->dev, "CQ create failed\n"); oce_cq_del(sc, cq); return NULL; } /** * @brief Deletes the completion queue * @param sc software handle to the device * @param cq pointer to a completion queue */ static void oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq) { struct oce_mbx mbx; struct mbx_destroy_common_cq *fwcmd; if (cq->ring != NULL) { bzero(&mbx, sizeof(struct oce_mbx)); /* now fill the command */ fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload; fwcmd->params.req.id = cq->cq_id; (void)oce_destroy_q(sc, &mbx, - sizeof(struct mbx_destroy_common_cq), QTYPE_CQ); + sizeof(struct mbx_destroy_common_cq), QTYPE_CQ, 0); /*NOW destroy the ring */ oce_destroy_ring_buffer(sc, cq->ring); cq->ring = NULL; } free(cq, M_DEVBUF); cq = NULL; } /** * @brief Start a receive queue * @param rq pointer to a receive queue */ int oce_start_rq(struct oce_rq *rq) { + POCE_SOFTC sc = (POCE_SOFTC) rq->parent; int rc; - rc = oce_alloc_rx_bufs(rq, rq->cfg.q_len); + if(sc->enable_hwlro) + rc = oce_alloc_rx_bufs(rq, 960); + else + rc = oce_alloc_rx_bufs(rq, rq->cfg.q_len - 1); if (rc == 0) oce_arm_cq(rq->parent, rq->cq->cq_id, 0, TRUE); + return rc; } /** * @brief Start a work queue * @param wq pointer to a work queue */ int oce_start_wq(struct oce_wq *wq) { oce_arm_cq(wq->parent, wq->cq->cq_id, 0, TRUE); return 0; } /** * @brief Start a mailbox queue * @param mq pointer to a mailbox queue */ int oce_start_mq(struct oce_mq *mq) { oce_arm_cq(mq->parent, mq->cq->cq_id, 0, TRUE); return 0; } /** * @brief Function to arm an EQ so that it can generate events * @param sc software handle to the device * @param qid id of the EQ returned by the fw at the time of creation * @param npopped number of EQEs to arm * @param rearm rearm bit enable/disable * @param clearint bit to clear the interrupt condition because of which * EQEs are generated */ void oce_arm_eq(POCE_SOFTC sc, int16_t qid, int npopped, uint32_t rearm, uint32_t clearint) { eq_db_t eq_db = { 0 }; eq_db.bits.rearm = rearm; eq_db.bits.event = 1; eq_db.bits.num_popped = npopped; eq_db.bits.clrint = clearint; eq_db.bits.qid = qid; OCE_WRITE_REG32(sc, db, PD_EQ_DB, eq_db.dw0); } /** * @brief Function to arm a CQ with CQEs * @param sc software handle to the device * @param qid id of the CQ returned by the fw at the time of creation * @param npopped number of CQEs to arm * @param rearm rearm bit enable/disable */ void oce_arm_cq(POCE_SOFTC sc, int16_t qid, int npopped, uint32_t rearm) { cq_db_t cq_db = { 0 }; cq_db.bits.rearm = rearm; cq_db.bits.num_popped = npopped; cq_db.bits.event = 0; cq_db.bits.qid = qid; OCE_WRITE_REG32(sc, db, PD_CQ_DB, cq_db.dw0); } /* * @brief function to cleanup the eqs used during stop * @param eq pointer to event queue structure * @returns the number of EQs processed */ void oce_drain_eq(struct oce_eq *eq) { struct oce_eqe *eqe; uint16_t num_eqe = 0; POCE_SOFTC sc = eq->parent; do { eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe); if (eqe->evnt == 0) break; eqe->evnt = 0; bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map, BUS_DMASYNC_POSTWRITE); num_eqe++; RING_GET(eq->ring, 1); } while (TRUE); oce_arm_eq(sc, eq->eq_id, num_eqe, FALSE, TRUE); } void oce_drain_wq_cq(struct oce_wq *wq) { POCE_SOFTC sc = wq->parent; struct oce_cq *cq = wq->cq; struct oce_nic_tx_cqe *cqe; int num_cqes = 0; bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); do { cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe); if (cqe->u0.dw[3] == 0) break; cqe->u0.dw[3] = 0; bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); RING_GET(cq->ring, 1); num_cqes++; } while (TRUE); oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE); } /* * @brief function to drain a MCQ and process its CQEs * @param dev software handle to the device * @param cq pointer to the cq to drain * @returns the number of CQEs processed */ void oce_drain_mq_cq(void *arg) { /* TODO: additional code. */ return; } /** * @brief function to process a Recieve queue * @param arg pointer to the RQ to charge * @return number of cqes processed */ void oce_drain_rq_cq(struct oce_rq *rq) { struct oce_nic_rx_cqe *cqe; uint16_t num_cqe = 0; struct oce_cq *cq; POCE_SOFTC sc; sc = rq->parent; cq = rq->cq; cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe); /* dequeue till you reach an invalid cqe */ while (RQ_CQE_VALID(cqe)) { RQ_CQE_INVALIDATE(cqe); RING_GET(cq->ring, 1); cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe); num_cqe++; } oce_arm_cq(sc, cq->cq_id, num_cqe, FALSE); return; } void oce_free_posted_rxbuf(struct oce_rq *rq) { struct oce_packet_desc *pd; while (rq->pending) { - pd = &rq->pckts[rq->packets_out]; + pd = &rq->pckts[rq->ring->cidx]; bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(rq->tag, pd->map); if (pd->mbuf != NULL) { m_freem(pd->mbuf); pd->mbuf = NULL; } - if ((rq->packets_out + 1) == OCE_RQ_PACKET_ARRAY_SIZE) - rq->packets_out = 0; - else - rq->packets_out++; - + RING_GET(rq->ring,1); rq->pending--; } } void -oce_stop_rx(POCE_SOFTC sc) +oce_rx_cq_clean_hwlro(struct oce_rq *rq) { - struct oce_mbx mbx; - struct mbx_delete_nic_rq *fwcmd; - struct oce_rq *rq; - int i = 0; + struct oce_cq *cq = rq->cq; + POCE_SOFTC sc = rq->parent; + struct nic_hwlro_singleton_cqe *cqe; + struct nic_hwlro_cqe_part2 *cqe2; + int flush_wait = 0; + int flush_compl = 0; + int num_frags = 0; - for_all_rq_queues(sc, rq, i) { - if (rq->qstate == QCREATED) { - /* Delete rxq in firmware */ + for (;;) { + bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); + cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe); + if(cqe->valid) { + if(cqe->cqe_type == 0) { /* singleton cqe */ + /* we should not get singleton cqe after cqe1 on same rq */ + if(rq->cqe_firstpart != NULL) { + device_printf(sc->dev, "Got singleton cqe after cqe1 \n"); + goto exit_rx_cq_clean_hwlro; + } + num_frags = cqe->pkt_size / rq->cfg.frag_size; + if(cqe->pkt_size % rq->cfg.frag_size) + num_frags++; + oce_discard_rx_comp(rq, num_frags); + /* Check if CQE is flush completion */ + if(!cqe->pkt_size) + flush_compl = 1; + cqe->valid = 0; + RING_GET(cq->ring, 1); + }else if(cqe->cqe_type == 0x1) { /* first part */ + /* we should not get cqe1 after cqe1 on same rq */ + if(rq->cqe_firstpart != NULL) { + device_printf(sc->dev, "Got cqe1 after cqe1 \n"); + goto exit_rx_cq_clean_hwlro; + } + rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe; + RING_GET(cq->ring, 1); + }else if(cqe->cqe_type == 0x2) { /* second part */ + cqe2 = (struct nic_hwlro_cqe_part2 *)cqe; + /* We should not get cqe2 without cqe1 */ + if(rq->cqe_firstpart == NULL) { + device_printf(sc->dev, "Got cqe2 without cqe1 \n"); + goto exit_rx_cq_clean_hwlro; + } + num_frags = cqe2->coalesced_size / rq->cfg.frag_size; + if(cqe2->coalesced_size % rq->cfg.frag_size) + num_frags++; + + /* Flush completion will always come in singleton CQE */ + oce_discard_rx_comp(rq, num_frags); - bzero(&mbx, sizeof(mbx)); - fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload; - fwcmd->params.req.rq_id = rq->rq_id; + rq->cqe_firstpart->valid = 0; + cqe2->valid = 0; + rq->cqe_firstpart = NULL; + RING_GET(cq->ring, 1); + } + oce_arm_cq(sc, cq->cq_id, 1, FALSE); + if(flush_compl) + break; + }else { + if (flush_wait++ > 100) { + device_printf(sc->dev, "did not receive hwlro flush compl\n"); + break; + } + oce_arm_cq(sc, cq->cq_id, 0, TRUE); + DELAY(1000); + } + } - (void)oce_destroy_q(sc, &mbx, - sizeof(struct mbx_delete_nic_rq), QTYPE_RQ); + /* After cleanup, leave the CQ in unarmed state */ + oce_arm_cq(sc, cq->cq_id, 0, FALSE); - rq->qstate = QDELETED; +exit_rx_cq_clean_hwlro: + return; +} - DELAY(1); - /* Free posted RX buffers that are not used */ - oce_free_posted_rxbuf(rq); +void +oce_rx_cq_clean(struct oce_rq *rq) +{ + struct oce_nic_rx_cqe *cqe; + struct oce_cq *cq; + POCE_SOFTC sc; + int flush_wait = 0; + int flush_compl = 0; + sc = rq->parent; + cq = rq->cq; + + for (;;) { + bus_dmamap_sync(cq->ring->dma.tag, + cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); + cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe); + if(RQ_CQE_VALID(cqe)) { + DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe)); + oce_discard_rx_comp(rq, cqe->u0.s.num_fragments); + /* Check if CQE is flush completion */ + if((cqe->u0.s.num_fragments==0)&&(cqe->u0.s.pkt_size == 0)&&(cqe->u0.s.error == 0)) + flush_compl = 1; + + RQ_CQE_INVALIDATE(cqe); + RING_GET(cq->ring, 1); +#if defined(INET6) || defined(INET) + if (IF_LRO_ENABLED(sc)) + oce_rx_flush_lro(rq); +#endif + oce_arm_cq(sc, cq->cq_id, 1, FALSE); + if(flush_compl) + break; + }else { + if (flush_wait++ > 100) { + device_printf(sc->dev, "did not receive flush compl\n"); + break; + } + oce_arm_cq(sc, cq->cq_id, 0, TRUE); + DELAY(1000); + } + } + + /* After cleanup, leave the CQ in unarmed state */ + oce_arm_cq(sc, cq->cq_id, 0, FALSE); +} + +void +oce_stop_rx(POCE_SOFTC sc) +{ + struct oce_mbx mbx; + struct mbx_delete_nic_rq *fwcmd; + struct mbx_delete_nic_rq_v1 *fwcmd1; + struct oce_rq *rq; + int i = 0; + + /* before deleting disable hwlro */ + if(sc->enable_hwlro) + oce_mbox_nic_set_iface_lro_config(sc, 0); + + for_all_rq_queues(sc, rq, i) { + if (rq->qstate == QCREATED) { + /* Delete rxq in firmware */ + LOCK(&rq->rx_lock); + + bzero(&mbx, sizeof(mbx)); + if(!rq->islro) { + fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload; + fwcmd->params.req.rq_id = rq->rq_id; + (void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq), QTYPE_RQ, 0); + }else { + fwcmd1 = (struct mbx_delete_nic_rq_v1 *)&mbx.payload; + fwcmd1->params.req.rq_id = rq->rq_id; + fwcmd1->params.req.rq_flags = (NIC_RQ_FLAGS_RSS | NIC_RQ_FLAGS_LRO); + + (void)oce_destroy_q(sc,&mbx,sizeof(struct mbx_delete_nic_rq_v1),QTYPE_RQ,1); + } + rq->qstate = QDELETED; + + DELAY(1000); - } - } + if(!rq->islro) + oce_rx_cq_clean(rq); + else + oce_rx_cq_clean_hwlro(rq); + + /* Free posted RX buffers that are not used */ + oce_free_posted_rxbuf(rq); + UNLOCK(&rq->rx_lock); + } + } } int oce_start_rx(POCE_SOFTC sc) { struct oce_rq *rq; int rc = 0, i; for_all_rq_queues(sc, rq, i) { if (rq->qstate == QCREATED) continue; - rc = oce_mbox_create_rq(rq); + if((i == 0) || (!sc->enable_hwlro)) { + rc = oce_mbox_create_rq(rq); + if (rc) + goto error; + rq->islro = 0; + }else { + rc = oce_mbox_create_rq_v2(rq); + if (rc) + goto error; + rq->islro = 1; + } + /* reset queue pointers */ + rq->qstate = QCREATED; + rq->pending = 0; + rq->ring->cidx = 0; + rq->ring->pidx = 0; + } + + if(sc->enable_hwlro) { + rc = oce_mbox_nic_set_iface_lro_config(sc, 1); if (rc) goto error; - /* reset queue pointers */ - rq->qstate = QCREATED; - rq->pending = 0; - rq->ring->cidx = 0; - rq->ring->pidx = 0; - rq->packets_in = 0; - rq->packets_out = 0; } DELAY(1); /* RSS config */ if (is_rss_enabled(sc)) { rc = oce_config_nic_rss(sc, (uint8_t) sc->if_id, RSS_ENABLE); if (rc) goto error; } + DELAY(1); return rc; error: device_printf(sc->dev, "Start RX failed\n"); return rc; } Index: stable/11/sys/dev/oce/oce_sysctl.c =================================================================== --- stable/11/sys/dev/oce/oce_sysctl.c (revision 338937) +++ stable/11/sys/dev/oce/oce_sysctl.c (revision 338938) @@ -1,1512 +1,1593 @@ /*- * Copyright (C) 2013 Emulex * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Emulex Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * freebsd-drivers@emulex.com * * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 */ /* $FreeBSD$ */ #include "oce_if.h" static void copy_stats_to_sc_xe201(POCE_SOFTC sc); static void copy_stats_to_sc_be3(POCE_SOFTC sc); static void copy_stats_to_sc_be2(POCE_SOFTC sc); +static void copy_stats_to_sc_sh(POCE_SOFTC sc); static int oce_sysctl_loopback(SYSCTL_HANDLER_ARGS); static int oce_sys_aic_enable(SYSCTL_HANDLER_ARGS); static int oce_be3_fwupgrade(POCE_SOFTC sc, const struct firmware *fw); static int oce_skyhawk_fwupgrade(POCE_SOFTC sc, const struct firmware *fw); static int oce_sys_fwupgrade(SYSCTL_HANDLER_ARGS); static int oce_lancer_fwupgrade(POCE_SOFTC sc, const struct firmware *fw); static int oce_sysctl_sfp_vpd_dump(SYSCTL_HANDLER_ARGS); static boolean_t oce_phy_flashing_required(POCE_SOFTC sc); static boolean_t oce_img_flashing_required(POCE_SOFTC sc, const char *p, int img_optype, uint32_t img_offset, uint32_t img_size, uint32_t hdrs_size); static void oce_add_stats_sysctls_be3(POCE_SOFTC sc, struct sysctl_ctx_list *ctx, struct sysctl_oid *stats_node); static void oce_add_stats_sysctls_xe201(POCE_SOFTC sc, struct sysctl_ctx_list *ctx, struct sysctl_oid *stats_node); extern char component_revision[32]; uint32_t sfp_vpd_dump_buffer[TRANSCEIVER_DATA_NUM_ELE]; struct flash_img_attri { int img_offset; int img_size; int img_type; bool skip_image; int optype; }; void oce_add_sysctls(POCE_SOFTC sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); struct sysctl_oid *stats_node; SYSCTL_ADD_STRING(ctx, child, OID_AUTO, "component_revision", CTLFLAG_RD, component_revision, sizeof(component_revision), "EMULEX One-Connect device driver revision"); SYSCTL_ADD_STRING(ctx, child, OID_AUTO, "firmware_version", CTLFLAG_RD, sc->fw_version, sizeof(sc->fw_version), "EMULEX One-Connect Firmware Version"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_rsp_handled", CTLFLAG_RW, &oce_max_rsp_handled, sizeof(oce_max_rsp_handled), "Maximum receive frames handled per interupt"); if ((sc->function_mode & FNM_FLEX10_MODE) || (sc->function_mode & FNM_UMC_MODE)) SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "speed", CTLFLAG_RD, &sc->qos_link_speed, 0,"QOS Speed"); else SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "speed", CTLFLAG_RD, &sc->speed, 0,"Link Speed"); if (sc->function_mode & FNM_UMC_MODE) SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "pvid", CTLFLAG_RD, &sc->pvid, 0,"PVID"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "loop_back", CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0, oce_sysctl_loopback, "I", "Loop Back Tests"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fw_upgrade", CTLTYPE_STRING | CTLFLAG_RW, (void *)sc, 0, oce_sys_fwupgrade, "A", "Firmware ufi file"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "aic_enable", CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 1, oce_sys_aic_enable, "I", "aic flags"); /* * Dumps Transceiver data * "sysctl dev.oce.0.sfp_vpd_dump=0" * "sysctl -x dev.oce.0.sfp_vpd_dump_buffer" for hex dump * "sysctl -b dev.oce.0.sfp_vpd_dump_buffer > sfp.bin" for binary dump */ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "sfp_vpd_dump", CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0, oce_sysctl_sfp_vpd_dump, "I", "Initiate a sfp_vpd_dump operation"); SYSCTL_ADD_OPAQUE(ctx, child, OID_AUTO, "sfp_vpd_dump_buffer", CTLFLAG_RD, sfp_vpd_dump_buffer, TRANSCEIVER_DATA_SIZE, "IU", "Access sfp_vpd_dump buffer"); stats_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, NULL, "Ethernet Statistics"); if (IS_BE(sc) || IS_SH(sc)) oce_add_stats_sysctls_be3(sc, ctx, stats_node); else oce_add_stats_sysctls_xe201(sc, ctx, stats_node); } static uint32_t oce_loopback_test(struct oce_softc *sc, uint8_t loopback_type) { uint32_t status = 0; oce_mbox_cmd_set_loopback(sc, sc->port_id, loopback_type, 1); status = oce_mbox_cmd_test_loopback(sc, sc->port_id, loopback_type, 1500, 2, 0xabc); oce_mbox_cmd_set_loopback(sc, sc->port_id, OCE_NO_LOOPBACK, 1); return status; } static int oce_sys_aic_enable(SYSCTL_HANDLER_ARGS) { int value = 0; uint32_t status, vector; POCE_SOFTC sc = (struct oce_softc *)arg1; struct oce_aic_obj *aic; + /* set current value for proper sysctl logging */ + value = sc->aic_obj[0].enable; status = sysctl_handle_int(oidp, &value, 0, req); if (status || !req->newptr) return status; for (vector = 0; vector < sc->intr_count; vector++) { aic = &sc->aic_obj[vector]; if (value == 0){ aic->max_eqd = aic->min_eqd = aic->et_eqd = 0; aic->enable = 0; } else { aic->max_eqd = OCE_MAX_EQD; aic->min_eqd = OCE_MIN_EQD; aic->et_eqd = OCE_MIN_EQD; aic->enable = TRUE; } } return 0; } static int oce_sysctl_loopback(SYSCTL_HANDLER_ARGS) { int value = 0; uint32_t status; struct oce_softc *sc = (struct oce_softc *)arg1; status = sysctl_handle_int(oidp, &value, 0, req); if (status || !req->newptr) return status; if (value != 1) { device_printf(sc->dev, "Not a Valid value. Set to loop_back=1 to run tests\n"); return 0; } if ((status = oce_loopback_test(sc, OCE_MAC_LOOPBACK))) { device_printf(sc->dev, "MAC Loopback Test = Failed (Error status = %d)\n", status); } else device_printf(sc->dev, "MAC Loopback Test = Success\n"); if ((status = oce_loopback_test(sc, OCE_PHY_LOOPBACK))) { device_printf(sc->dev, "PHY Loopback Test = Failed (Error status = %d)\n", status); } else device_printf(sc->dev, "PHY Loopback Test = Success\n"); if ((status = oce_loopback_test(sc, OCE_ONE_PORT_EXT_LOOPBACK))) { device_printf(sc->dev, "EXT Loopback Test = Failed (Error status = %d)\n", status); } else device_printf(sc->dev, "EXT Loopback Test = Success\n"); return 0; } static int oce_sys_fwupgrade(SYSCTL_HANDLER_ARGS) { char ufiname[256] = {0}; uint32_t status = 1; struct oce_softc *sc = (struct oce_softc *)arg1; const struct firmware *fw; status = sysctl_handle_string(oidp, ufiname, sizeof(ufiname), req); if (status || !req->newptr) return status; fw = firmware_get(ufiname); if (fw == NULL) { device_printf(sc->dev, "Unable to get Firmware. " "Make sure %s is copied to /boot/modules\n", ufiname); return ENOENT; } if (IS_BE(sc)) { if ((sc->flags & OCE_FLAGS_BE2)) { device_printf(sc->dev, "Flashing not supported for BE2 yet.\n"); status = 1; goto done; } status = oce_be3_fwupgrade(sc, fw); } else if (IS_SH(sc)) { status = oce_skyhawk_fwupgrade(sc,fw); } else status = oce_lancer_fwupgrade(sc, fw); done: if (status) { device_printf(sc->dev, "Firmware Upgrade failed\n"); } else { device_printf(sc->dev, "Firmware Flashed successfully\n"); } /* Release Firmware*/ firmware_put(fw, FIRMWARE_UNLOAD); return status; } static void oce_fill_flash_img_data(POCE_SOFTC sc, const struct flash_sec_info * fsec, struct flash_img_attri *pimg, int i, const struct firmware *fw, int bin_offset) { if (IS_SH(sc)) { pimg->img_offset = HOST_32(fsec->fsec_entry[i].offset); pimg->img_size = HOST_32(fsec->fsec_entry[i].pad_size); } pimg->img_type = HOST_32(fsec->fsec_entry[i].type); pimg->skip_image = FALSE; switch (pimg->img_type) { case IMG_ISCSI: pimg->optype = 0; if (IS_BE3(sc)) { pimg->img_offset = 2097152; pimg->img_size = 2097152; } break; case IMG_REDBOOT: pimg->optype = 1; if (IS_BE3(sc)) { pimg->img_offset = 262144; pimg->img_size = 1048576; } if (!oce_img_flashing_required(sc, fw->data, pimg->optype, pimg->img_offset, pimg->img_size, bin_offset)) pimg->skip_image = TRUE; break; case IMG_BIOS: pimg->optype = 2; if (IS_BE3(sc)) { pimg->img_offset = 12582912; pimg->img_size = 524288; } break; case IMG_PXEBIOS: pimg->optype = 3; if (IS_BE3(sc)) { pimg->img_offset = 13107200; pimg->img_size = 524288; } break; case IMG_FCOEBIOS: pimg->optype = 8; if (IS_BE3(sc)) { pimg->img_offset = 13631488; pimg->img_size = 524288; } break; case IMG_ISCSI_BAK: pimg->optype = 9; if (IS_BE3(sc)) { pimg->img_offset = 4194304; pimg->img_size = 2097152; } break; case IMG_FCOE: pimg->optype = 10; if (IS_BE3(sc)) { pimg->img_offset = 6291456; pimg->img_size = 2097152; } break; case IMG_FCOE_BAK: pimg->optype = 11; if (IS_BE3(sc)) { pimg->img_offset = 8388608; pimg->img_size = 2097152; } break; case IMG_NCSI: pimg->optype = 13; if (IS_BE3(sc)) { pimg->img_offset = 15990784; pimg->img_size = 262144; } break; case IMG_PHY: pimg->optype = 99; if (IS_BE3(sc)) { pimg->img_offset = 1310720; pimg->img_size = 262144; } if (!oce_phy_flashing_required(sc)) pimg->skip_image = TRUE; break; default: pimg->skip_image = TRUE; break; } } static int oce_sh_be3_flashdata(POCE_SOFTC sc, const struct firmware *fw, int32_t num_imgs) { char cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "}; const char *p = (const char *)fw->data; const struct flash_sec_info *fsec = NULL; struct mbx_common_read_write_flashrom *req; int rc = 0, i, bin_offset = 0, opcode, num_bytes; OCE_DMA_MEM dma_mem; struct flash_img_attri imgatt; /* Validate Cookie */ bin_offset = (sizeof(struct flash_file_hdr) + (num_imgs * sizeof(struct image_hdr))); p += bin_offset; while (p < ((const char *)fw->data + fw->datasize)) { fsec = (const struct flash_sec_info *)p; if (!memcmp(cookie, fsec->cookie, sizeof(cookie))) break; fsec = NULL; p += 32; } if (!fsec) { device_printf(sc->dev, "Invalid Cookie. Firmware image corrupted ?\n"); return EINVAL; } rc = oce_dma_alloc(sc, sizeof(struct mbx_common_read_write_flashrom), &dma_mem, 0); if (rc) { device_printf(sc->dev, "Memory allocation failure while flashing\n"); return ENOMEM; } req = OCE_DMAPTR(&dma_mem, struct mbx_common_read_write_flashrom); if (IS_SH(sc)) num_imgs = HOST_32(fsec->fsec_hdr.num_images); else if (IS_BE3(sc)) num_imgs = MAX_FLASH_COMP; for (i = 0; i < num_imgs; i++) { bzero(&imgatt, sizeof(struct flash_img_attri)); oce_fill_flash_img_data(sc, fsec, &imgatt, i, fw, bin_offset); if (imgatt.skip_image) continue; p = fw->data; p = p + bin_offset + imgatt.img_offset; if ((p + imgatt.img_size) > ((const char *)fw->data + fw->datasize)) { rc = 1; goto ret; } while (imgatt.img_size) { if (imgatt.img_size > 32*1024) num_bytes = 32*1024; else num_bytes = imgatt.img_size; imgatt.img_size -= num_bytes; if (!imgatt.img_size) opcode = FLASHROM_OPER_FLASH; else opcode = FLASHROM_OPER_SAVE; memcpy(req->data_buffer, p, num_bytes); p += num_bytes; rc = oce_mbox_write_flashrom(sc, imgatt.optype, opcode, &dma_mem, num_bytes); if (rc) { device_printf(sc->dev, "cmd to write to flash rom failed.\n"); rc = EIO; goto ret; } /* Leave the CPU for others for some time */ pause("yield", 10); } } ret: oce_dma_free(sc, &dma_mem); return rc; } -#define UFI_TYPE2 2 -#define UFI_TYPE3 3 -#define UFI_TYPE3R 10 -#define UFI_TYPE4 4 -#define UFI_TYPE4R 11 +#define UFI_TYPE2 2 +#define UFI_TYPE3 3 +#define UFI_TYPE3R 10 +#define UFI_TYPE4 4 +#define UFI_TYPE4R 11 static int oce_get_ufi_type(POCE_SOFTC sc, - const struct flash_file_hdr *fhdr) + const struct flash_file_hdr *fhdr) { - if (fhdr == NULL) - goto be_get_ufi_exit; + if (fhdr == NULL) + goto be_get_ufi_exit; - if (IS_SH(sc) && fhdr->build[0] == '4') { - if (fhdr->asic_type_rev >= 0x10) - return UFI_TYPE4R; - else - return UFI_TYPE4; - } else if (IS_BE3(sc) && fhdr->build[0] == '3') { - if (fhdr->asic_type_rev == 0x10) - return UFI_TYPE3R; - else - return UFI_TYPE3; - } else if (IS_BE2(sc) && fhdr->build[0] == '2') - return UFI_TYPE2; + if (IS_SH(sc) && fhdr->build[0] == '4') { + if (fhdr->asic_type_rev >= 0x10) + return UFI_TYPE4R; + else + return UFI_TYPE4; + } else if (IS_BE3(sc) && fhdr->build[0] == '3') { + if (fhdr->asic_type_rev == 0x10) + return UFI_TYPE3R; + else + return UFI_TYPE3; + } else if (IS_BE2(sc) && fhdr->build[0] == '2') + return UFI_TYPE2; be_get_ufi_exit: - device_printf(sc->dev, - "UFI and Interface are not compatible for flashing\n"); - return -1; + device_printf(sc->dev, + "UFI and Interface are not compatible for flashing\n"); + return -1; } static int oce_skyhawk_fwupgrade(POCE_SOFTC sc, const struct firmware *fw) { int rc = 0, num_imgs = 0, i = 0, ufi_type; const struct flash_file_hdr *fhdr; const struct image_hdr *img_ptr; fhdr = (const struct flash_file_hdr *)fw->data; ufi_type = oce_get_ufi_type(sc, fhdr); /* Display flash version */ device_printf(sc->dev, "Flashing Firmware %s\n", &fhdr->build[2]); num_imgs = fhdr->num_imgs; for (i = 0; i < num_imgs; i++) { img_ptr = (const struct image_hdr *)((const char *)fw->data + sizeof(struct flash_file_hdr) + (i * sizeof(struct image_hdr))); if (img_ptr->imageid != 1) continue; switch (ufi_type) { case UFI_TYPE4R: rc = oce_sh_be3_flashdata(sc, fw, num_imgs); break; case UFI_TYPE4: if (sc->asic_revision < 0x10) rc = oce_sh_be3_flashdata(sc, fw, num_imgs); else { rc = -1; device_printf(sc->dev, "Cant load SH A0 UFI on B0\n"); } break; default: rc = -1; break; } } return rc; } static int oce_be3_fwupgrade(POCE_SOFTC sc, const struct firmware *fw) { int rc = 0, num_imgs = 0, i = 0; const struct flash_file_hdr *fhdr; const struct image_hdr *img_ptr; fhdr = (const struct flash_file_hdr *)fw->data; if (fhdr->build[0] != '3') { device_printf(sc->dev, "Invalid BE3 firmware image\n"); return EINVAL; } /* Display flash version */ device_printf(sc->dev, "Flashing Firmware %s\n", &fhdr->build[2]); num_imgs = fhdr->num_imgs; for (i = 0; i < num_imgs; i++) { img_ptr = (const struct image_hdr *)((const char *)fw->data + sizeof(struct flash_file_hdr) + (i * sizeof(struct image_hdr))); if (img_ptr->imageid == 1) { rc = oce_sh_be3_flashdata(sc, fw, num_imgs); break; } } return rc; } static boolean_t oce_phy_flashing_required(POCE_SOFTC sc) { int status = 0; struct oce_phy_info phy_info; status = oce_mbox_get_phy_info(sc, &phy_info); if (status) return FALSE; if ((phy_info.phy_type == TN_8022) && (phy_info.interface_type == PHY_TYPE_BASET_10GB)) { return TRUE; } return FALSE; } static boolean_t oce_img_flashing_required(POCE_SOFTC sc, const char *p, int img_optype, uint32_t img_offset, uint32_t img_size, uint32_t hdrs_size) { uint32_t crc_offset; uint8_t flashed_crc[4]; int status; crc_offset = hdrs_size + img_offset + img_size - 4; p += crc_offset; status = oce_mbox_get_flashrom_crc(sc, flashed_crc, (img_size - 4), img_optype); if (status) return TRUE; /* Some thing worng. ReFlash */ /*update redboot only if crc does not match*/ if (bcmp(flashed_crc, p, 4)) return TRUE; else return FALSE; } static int oce_lancer_fwupgrade(POCE_SOFTC sc, const struct firmware *fw) { int rc = 0; OCE_DMA_MEM dma_mem; const uint8_t *data = NULL; uint8_t *dest_image_ptr = NULL; size_t size = 0; uint32_t data_written = 0, chunk_size = 0; uint32_t offset = 0, add_status = 0; if (!IS_ALIGNED(fw->datasize, sizeof(uint32_t))) { device_printf(sc->dev, "Lancer FW image is not 4 byte aligned."); return EINVAL; } rc = oce_dma_alloc(sc, 32*1024, &dma_mem, 0); if (rc) { device_printf(sc->dev, "Memory allocation failure while flashing Lancer\n"); return ENOMEM; } size = fw->datasize; data = fw->data; dest_image_ptr = OCE_DMAPTR(&dma_mem, uint8_t); while (size) { chunk_size = MIN(size, (32*1024)); bcopy(data, dest_image_ptr, chunk_size); rc = oce_mbox_lancer_write_flashrom(sc, chunk_size, offset, &dma_mem, &data_written, &add_status); if (rc) break; size -= data_written; data += data_written; offset += data_written; pause("yield", 10); } if (!rc) /* Commit the firmware*/ rc = oce_mbox_lancer_write_flashrom(sc, 0, offset, &dma_mem, &data_written, &add_status); if (rc) { device_printf(sc->dev, "Lancer firmware load error. " "Addstatus = 0x%x, status = %d \n", add_status, rc); rc = EIO; } oce_dma_free(sc, &dma_mem); return rc; } static void oce_add_stats_sysctls_be3(POCE_SOFTC sc, struct sysctl_ctx_list *ctx, struct sysctl_oid *stats_node) { struct sysctl_oid *rx_stats_node, *tx_stats_node; struct sysctl_oid_list *rx_stat_list, *tx_stat_list; struct sysctl_oid_list *queue_stats_list; struct sysctl_oid *queue_stats_node; struct oce_drv_stats *stats; char prefix[32]; int i; stats = &sc->oce_stats_info; rx_stats_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats_node), OID_AUTO,"rx", CTLFLAG_RD, NULL, "RX Ethernet Statistics"); rx_stat_list = SYSCTL_CHILDREN(rx_stats_node); SYSCTL_ADD_QUAD(ctx, rx_stat_list, OID_AUTO, "total_pkts", CTLFLAG_RD, &stats->rx.t_rx_pkts, "Total Received Packets"); SYSCTL_ADD_QUAD(ctx, rx_stat_list, OID_AUTO, "total_bytes", CTLFLAG_RD, &stats->rx.t_rx_bytes, "Total Received Bytes"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "total_frags", CTLFLAG_RD, &stats->rx.t_rx_frags, 0, "Total Received Fragements"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "total_mcast_pkts", CTLFLAG_RD, &stats->rx.t_rx_mcast_pkts, 0, "Total Received Multicast Packets"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "total_ucast_pkts", CTLFLAG_RD, &stats->rx.t_rx_ucast_pkts, 0, "Total Received Unicast Packets"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "total_rxcp_errs", CTLFLAG_RD, &stats->rx.t_rxcp_errs, 0, "Total Receive completion errors"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "pause_frames", CTLFLAG_RD, &stats->u0.be.rx_pause_frames, 0, "Pause Frames"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "priority_pause_frames", CTLFLAG_RD, &stats->u0.be.rx_priority_pause_frames, 0, "Priority Pause Frames"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "control_frames", CTLFLAG_RD, &stats->u0.be.rx_control_frames, 0, "Control Frames"); for (i = 0; i < sc->nrqs; i++) { sprintf(prefix, "queue%d",i); queue_stats_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(rx_stats_node), OID_AUTO, prefix, CTLFLAG_RD, NULL, "Queue name"); queue_stats_list = SYSCTL_CHILDREN(queue_stats_node); SYSCTL_ADD_QUAD(ctx, queue_stats_list, OID_AUTO, "rx_pkts", CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_pkts, "Receive Packets"); SYSCTL_ADD_QUAD(ctx, queue_stats_list, OID_AUTO, "rx_bytes", CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_bytes, "Recived Bytes"); SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "rx_frags", CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_frags, 0, "Received Fragments"); SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "rx_mcast_pkts", CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_mcast_pkts, 0, "Received Multicast Packets"); SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "rx_ucast_pkts", CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_ucast_pkts, 0, "Received Unicast Packets"); SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "rxcp_err", CTLFLAG_RD, &sc->rq[i]->rx_stats.rxcp_err, 0, "Received Completion Errors"); - + if(IS_SH(sc)) { + SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "rx_drops_no_frags", + CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_drops_no_frags, 0, + "num of packet drops due to no fragments"); + } } rx_stats_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(rx_stats_node), OID_AUTO, "err", CTLFLAG_RD, NULL, "Receive Error Stats"); rx_stat_list = SYSCTL_CHILDREN(rx_stats_node); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "crc_errs", CTLFLAG_RD, &stats->u0.be.rx_crc_errors, 0, "CRC Errors"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "pbuf_errors", CTLFLAG_RD, &stats->u0.be.rx_drops_no_pbuf, 0, "Drops due to pbuf full"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "erx_errors", CTLFLAG_RD, &stats->u0.be.rx_drops_no_erx_descr, 0, "ERX Errors"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "alignment_errors", CTLFLAG_RD, &stats->u0.be.rx_drops_too_many_frags, 0, "RX Alignmnet Errors"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "in_range_errors", CTLFLAG_RD, &stats->u0.be.rx_in_range_errors, 0, "In Range Errors"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "out_range_errors", CTLFLAG_RD, &stats->u0.be.rx_out_range_errors, 0, "Out Range Errors"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "frame_too_long", CTLFLAG_RD, &stats->u0.be.rx_frame_too_long, 0, "Frame Too Long"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "address_match_errors", CTLFLAG_RD, &stats->u0.be.rx_address_match_errors, 0, "Address Match Errors"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "dropped_too_small", CTLFLAG_RD, &stats->u0.be.rx_dropped_too_small, 0, "Dropped Too Small"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "dropped_too_short", CTLFLAG_RD, &stats->u0.be.rx_dropped_too_short, 0, "Dropped Too Short"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "dropped_header_too_small", CTLFLAG_RD, &stats->u0.be.rx_dropped_header_too_small, 0, "Dropped Header Too Small"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "dropped_tcp_length", CTLFLAG_RD, &stats->u0.be.rx_dropped_tcp_length, 0, "Dropped TCP Length"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "dropped_runt", CTLFLAG_RD, &stats->u0.be.rx_dropped_runt, 0, "Dropped runt"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "ip_checksum_errs", CTLFLAG_RD, &stats->u0.be.rx_ip_checksum_errs, 0, "IP Checksum Errors"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "tcp_checksum_errs", CTLFLAG_RD, &stats->u0.be.rx_tcp_checksum_errs, 0, "TCP Checksum Errors"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "udp_checksum_errs", CTLFLAG_RD, &stats->u0.be.rx_udp_checksum_errs, 0, "UDP Checksum Errors"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "fifo_overflow_drop", CTLFLAG_RD, &stats->u0.be.rxpp_fifo_overflow_drop, 0, "FIFO Overflow Drop"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "input_fifo_overflow_drop", CTLFLAG_RD, &stats->u0.be.rx_input_fifo_overflow_drop, 0, "Input FIFO Overflow Drop"); tx_stats_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats_node), OID_AUTO, "tx",CTLFLAG_RD, NULL, "TX Ethernet Statistics"); tx_stat_list = SYSCTL_CHILDREN(tx_stats_node); SYSCTL_ADD_QUAD(ctx, tx_stat_list, OID_AUTO, "total_tx_pkts", CTLFLAG_RD, &stats->tx.t_tx_pkts, "Total Transmit Packets"); SYSCTL_ADD_QUAD(ctx, tx_stat_list, OID_AUTO, "total_tx_bytes", CTLFLAG_RD, &stats->tx.t_tx_bytes, "Total Transmit Bytes"); SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "total_tx_reqs", CTLFLAG_RD, &stats->tx.t_tx_reqs, 0, "Total Transmit Requests"); SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "total_tx_stops", CTLFLAG_RD, &stats->tx.t_tx_stops, 0, "Total Transmit Stops"); SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "total_tx_wrbs", CTLFLAG_RD, &stats->tx.t_tx_wrbs, 0, "Total Transmit WRB's"); SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "total_tx_compl", CTLFLAG_RD, &stats->tx.t_tx_compl, 0, "Total Transmit Completions"); SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "total_ipv6_ext_hdr_tx_drop", CTLFLAG_RD, &stats->tx.t_ipv6_ext_hdr_tx_drop, 0, "Total Transmit IPV6 Drops"); SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "pauseframes", CTLFLAG_RD, &stats->u0.be.tx_pauseframes, 0, "Pause Frames"); SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "priority_pauseframes", CTLFLAG_RD, &stats->u0.be.tx_priority_pauseframes, 0, "Priority Pauseframes"); SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "controlframes", CTLFLAG_RD, &stats->u0.be.tx_controlframes, 0, "Tx Control Frames"); for (i = 0; i < sc->nwqs; i++) { sprintf(prefix, "queue%d",i); queue_stats_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tx_stats_node), OID_AUTO, prefix, CTLFLAG_RD, NULL, "Queue name"); queue_stats_list = SYSCTL_CHILDREN(queue_stats_node); SYSCTL_ADD_QUAD(ctx, queue_stats_list, OID_AUTO, "tx_pkts", CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_pkts, "Transmit Packets"); SYSCTL_ADD_QUAD(ctx, queue_stats_list, OID_AUTO, "tx_bytes", CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_bytes, "Transmit Bytes"); SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "tx_reqs", CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_reqs, 0, "Transmit Requests"); SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "tx_stops", CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_stops, 0, "Transmit Stops"); SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "tx_wrbs", CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_wrbs, 0, "Transmit WRB's"); SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "tx_compl", CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_compl, 0, "Transmit Completions"); SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "ipv6_ext_hdr_tx_drop",CTLFLAG_RD, &sc->wq[i]->tx_stats.ipv6_ext_hdr_tx_drop, 0, "Transmit IPV6 Ext Header Drop"); } return; } static void oce_add_stats_sysctls_xe201(POCE_SOFTC sc, struct sysctl_ctx_list *ctx, struct sysctl_oid *stats_node) { struct sysctl_oid *rx_stats_node, *tx_stats_node; struct sysctl_oid_list *rx_stat_list, *tx_stat_list; struct sysctl_oid_list *queue_stats_list; struct sysctl_oid *queue_stats_node; struct oce_drv_stats *stats; char prefix[32]; int i; stats = &sc->oce_stats_info; rx_stats_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats_node), OID_AUTO, "rx", CTLFLAG_RD, NULL, "RX Ethernet Statistics"); rx_stat_list = SYSCTL_CHILDREN(rx_stats_node); SYSCTL_ADD_QUAD(ctx, rx_stat_list, OID_AUTO, "total_pkts", CTLFLAG_RD, &stats->rx.t_rx_pkts, "Total Received Packets"); SYSCTL_ADD_QUAD(ctx, rx_stat_list, OID_AUTO, "total_bytes", CTLFLAG_RD, &stats->rx.t_rx_bytes, "Total Received Bytes"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "total_frags", CTLFLAG_RD, &stats->rx.t_rx_frags, 0, "Total Received Fragements"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "total_mcast_pkts", CTLFLAG_RD, &stats->rx.t_rx_mcast_pkts, 0, "Total Received Multicast Packets"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "total_ucast_pkts", CTLFLAG_RD, &stats->rx.t_rx_ucast_pkts, 0, "Total Received Unicast Packets"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "total_rxcp_errs", CTLFLAG_RD, &stats->rx.t_rxcp_errs, 0, "Total Receive completion errors"); SYSCTL_ADD_UQUAD(ctx, rx_stat_list, OID_AUTO, "pause_frames", CTLFLAG_RD, &stats->u0.xe201.rx_pause_frames, "Pause Frames"); SYSCTL_ADD_UQUAD(ctx, rx_stat_list, OID_AUTO, "control_frames", CTLFLAG_RD, &stats->u0.xe201.rx_control_frames, "Control Frames"); for (i = 0; i < sc->nrqs; i++) { sprintf(prefix, "queue%d",i); queue_stats_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(rx_stats_node), OID_AUTO, prefix, CTLFLAG_RD, NULL, "Queue name"); queue_stats_list = SYSCTL_CHILDREN(queue_stats_node); SYSCTL_ADD_QUAD(ctx, queue_stats_list, OID_AUTO, "rx_pkts", CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_pkts, "Receive Packets"); SYSCTL_ADD_QUAD(ctx, queue_stats_list, OID_AUTO, "rx_bytes", CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_bytes, "Recived Bytes"); SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "rx_frags", CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_frags, 0, "Received Fragments"); SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "rx_mcast_pkts", CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_mcast_pkts, 0, "Received Multicast Packets"); SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "rx_ucast_pkts",CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_ucast_pkts, 0, "Received Unicast Packets"); SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "rxcp_err", CTLFLAG_RD, &sc->rq[i]->rx_stats.rxcp_err, 0, "Received Completion Errors"); } rx_stats_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(rx_stats_node), OID_AUTO, "err", CTLFLAG_RD, NULL, "Receive Error Stats"); rx_stat_list = SYSCTL_CHILDREN(rx_stats_node); SYSCTL_ADD_UQUAD(ctx, rx_stat_list, OID_AUTO, "crc_errs", CTLFLAG_RD, &stats->u0.xe201.rx_crc_errors, "CRC Errors"); SYSCTL_ADD_UQUAD(ctx, rx_stat_list, OID_AUTO, "alignment_errors", CTLFLAG_RD, &stats->u0.xe201.rx_alignment_errors, "RX Alignmnet Errors"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "in_range_errors", CTLFLAG_RD, &stats->u0.xe201.rx_in_range_errors, 0, "In Range Errors"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "out_range_errors", CTLFLAG_RD, &stats->u0.xe201.rx_out_of_range_errors, 0, "Out Range Errors"); SYSCTL_ADD_UQUAD(ctx, rx_stat_list, OID_AUTO, "frame_too_long", CTLFLAG_RD, &stats->u0.xe201.rx_frames_too_long, "Frame Too Long"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "address_match_errors", CTLFLAG_RD, &stats->u0.xe201.rx_address_match_errors, 0, "Address Match Errors"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "dropped_too_small", CTLFLAG_RD, &stats->u0.xe201.rx_dropped_too_small, 0, "Dropped Too Small"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "dropped_too_short", CTLFLAG_RD, &stats->u0.xe201.rx_dropped_too_short, 0, "Dropped Too Short"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "dropped_header_too_small", CTLFLAG_RD, &stats->u0.xe201.rx_dropped_header_too_small, 0, "Dropped Header Too Small"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "dropped_tcp_length", CTLFLAG_RD, &stats->u0.xe201.rx_dropped_invalid_tcp_length, 0, "Dropped TCP Length"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "dropped_runt", CTLFLAG_RD, &stats->u0.xe201.rx_dropped_runt, 0, "Dropped runt"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "ip_checksum_errs", CTLFLAG_RD, &stats->u0.xe201.rx_ip_checksum_errors, 0, "IP Checksum Errors"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "tcp_checksum_errs", CTLFLAG_RD, &stats->u0.xe201.rx_tcp_checksum_errors, 0, "TCP Checksum Errors"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "udp_checksum_errs", CTLFLAG_RD, &stats->u0.xe201.rx_udp_checksum_errors, 0, "UDP Checksum Errors"); SYSCTL_ADD_UINT(ctx, rx_stat_list, OID_AUTO, "input_fifo_overflow_drop", CTLFLAG_RD, &stats->u0.xe201.rx_fifo_overflow, 0, "Input FIFO Overflow Drop"); tx_stats_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats_node), OID_AUTO, "tx", CTLFLAG_RD, NULL, "TX Ethernet Statistics"); tx_stat_list = SYSCTL_CHILDREN(tx_stats_node); SYSCTL_ADD_QUAD(ctx, tx_stat_list, OID_AUTO, "total_tx_pkts", CTLFLAG_RD, &stats->tx.t_tx_pkts, "Total Transmit Packets"); SYSCTL_ADD_QUAD(ctx, tx_stat_list, OID_AUTO, "total_tx_bytes", CTLFLAG_RD, &stats->tx.t_tx_bytes, "Total Transmit Bytes"); SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "total_tx_reqs", CTLFLAG_RD, &stats->tx.t_tx_reqs, 0, "Total Transmit Requests"); SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "total_tx_stops", CTLFLAG_RD, &stats->tx.t_tx_stops, 0, "Total Transmit Stops"); SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "total_tx_wrbs", CTLFLAG_RD, &stats->tx.t_tx_wrbs, 0, "Total Transmit WRB's"); SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "total_tx_compl", CTLFLAG_RD, &stats->tx.t_tx_compl, 0, "Total Transmit Completions"); SYSCTL_ADD_UINT(ctx, tx_stat_list, OID_AUTO, "total_ipv6_ext_hdr_tx_drop", CTLFLAG_RD, &stats->tx.t_ipv6_ext_hdr_tx_drop, 0, "Total Transmit IPV6 Drops"); SYSCTL_ADD_UQUAD(ctx, tx_stat_list, OID_AUTO, "pauseframes", CTLFLAG_RD, &stats->u0.xe201.tx_pause_frames, "Pause Frames"); SYSCTL_ADD_UQUAD(ctx, tx_stat_list, OID_AUTO, "controlframes", CTLFLAG_RD, &stats->u0.xe201.tx_control_frames, "Tx Control Frames"); for (i = 0; i < sc->nwqs; i++) { sprintf(prefix, "queue%d",i); queue_stats_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tx_stats_node), OID_AUTO, prefix, CTLFLAG_RD, NULL, "Queue name"); queue_stats_list = SYSCTL_CHILDREN(queue_stats_node); SYSCTL_ADD_QUAD(ctx, queue_stats_list, OID_AUTO, "tx_pkts", CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_pkts, "Transmit Packets"); SYSCTL_ADD_QUAD(ctx, queue_stats_list, OID_AUTO, "tx_bytes", CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_bytes, "Transmit Bytes"); SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "tx_reqs", CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_reqs, 0, "Transmit Requests"); SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "tx_stops", CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_stops, 0, "Transmit Stops"); SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "tx_wrbs", CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_wrbs, 0, "Transmit WRB's"); SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "tx_compl", CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_compl, 0, "Transmit Completions"); SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "ipv6_ext_hdr_tx_drop", CTLFLAG_RD, &sc->wq[i]->tx_stats.ipv6_ext_hdr_tx_drop, 0, "Transmit IPV6 Ext Header Drop"); } return; } void oce_refresh_queue_stats(POCE_SOFTC sc) { struct oce_drv_stats *adapter_stats; int i; adapter_stats = &sc->oce_stats_info; /* Caluculate total TX and TXstats from all queues */ bzero(&adapter_stats->rx, sizeof(struct oce_rx_stats)); for (i = 0; i < sc->nrqs; i++) { adapter_stats->rx.t_rx_pkts += sc->rq[i]->rx_stats.rx_pkts; adapter_stats->rx.t_rx_bytes += sc->rq[i]->rx_stats.rx_bytes; adapter_stats->rx.t_rx_frags += sc->rq[i]->rx_stats.rx_frags; adapter_stats->rx.t_rx_mcast_pkts += sc->rq[i]->rx_stats.rx_mcast_pkts; adapter_stats->rx.t_rx_ucast_pkts += sc->rq[i]->rx_stats.rx_ucast_pkts; adapter_stats->rx.t_rxcp_errs += sc-> rq[i]->rx_stats.rxcp_err; } bzero(&adapter_stats->tx, sizeof(struct oce_tx_stats)); for (i = 0; i < sc->nwqs; i++) { adapter_stats->tx.t_tx_reqs += sc->wq[i]->tx_stats.tx_reqs; adapter_stats->tx.t_tx_stops += sc->wq[i]->tx_stats.tx_stops; adapter_stats->tx.t_tx_wrbs += sc->wq[i]->tx_stats.tx_wrbs; adapter_stats->tx.t_tx_compl += sc->wq[i]->tx_stats.tx_compl; adapter_stats->tx.t_tx_bytes += sc->wq[i]->tx_stats.tx_bytes; adapter_stats->tx.t_tx_pkts += sc->wq[i]->tx_stats.tx_pkts; adapter_stats->tx.t_ipv6_ext_hdr_tx_drop += sc->wq[i]->tx_stats.ipv6_ext_hdr_tx_drop; } } static void copy_stats_to_sc_xe201(POCE_SOFTC sc) { struct oce_xe201_stats *adapter_stats; struct mbx_get_pport_stats *nic_mbx; struct pport_stats *port_stats; nic_mbx = OCE_DMAPTR(&sc->stats_mem, struct mbx_get_pport_stats); port_stats = &nic_mbx->params.rsp.pps; adapter_stats = &sc->oce_stats_info.u0.xe201; adapter_stats->tx_pkts = port_stats->tx_pkts; adapter_stats->tx_unicast_pkts = port_stats->tx_unicast_pkts; adapter_stats->tx_multicast_pkts = port_stats->tx_multicast_pkts; adapter_stats->tx_broadcast_pkts = port_stats->tx_broadcast_pkts; adapter_stats->tx_bytes = port_stats->tx_bytes; adapter_stats->tx_unicast_bytes = port_stats->tx_unicast_bytes; adapter_stats->tx_multicast_bytes = port_stats->tx_multicast_bytes; adapter_stats->tx_broadcast_bytes = port_stats->tx_broadcast_bytes; adapter_stats->tx_discards = port_stats->tx_discards; adapter_stats->tx_errors = port_stats->tx_errors; adapter_stats->tx_pause_frames = port_stats->tx_pause_frames; adapter_stats->tx_pause_on_frames = port_stats->tx_pause_on_frames; adapter_stats->tx_pause_off_frames = port_stats->tx_pause_off_frames; adapter_stats->tx_internal_mac_errors = port_stats->tx_internal_mac_errors; adapter_stats->tx_control_frames = port_stats->tx_control_frames; adapter_stats->tx_pkts_64_bytes = port_stats->tx_pkts_64_bytes; adapter_stats->tx_pkts_65_to_127_bytes = port_stats->tx_pkts_65_to_127_bytes; adapter_stats->tx_pkts_128_to_255_bytes = port_stats->tx_pkts_128_to_255_bytes; adapter_stats->tx_pkts_256_to_511_bytes = port_stats->tx_pkts_256_to_511_bytes; adapter_stats->tx_pkts_512_to_1023_bytes = port_stats->tx_pkts_512_to_1023_bytes; adapter_stats->tx_pkts_1024_to_1518_bytes = port_stats->tx_pkts_1024_to_1518_bytes; adapter_stats->tx_pkts_1519_to_2047_bytes = port_stats->tx_pkts_1519_to_2047_bytes; adapter_stats->tx_pkts_2048_to_4095_bytes = port_stats->tx_pkts_2048_to_4095_bytes; adapter_stats->tx_pkts_4096_to_8191_bytes = port_stats->tx_pkts_4096_to_8191_bytes; adapter_stats->tx_pkts_8192_to_9216_bytes = port_stats->tx_pkts_8192_to_9216_bytes; adapter_stats->tx_lso_pkts = port_stats->tx_lso_pkts; adapter_stats->rx_pkts = port_stats->rx_pkts; adapter_stats->rx_unicast_pkts = port_stats->rx_unicast_pkts; adapter_stats->rx_multicast_pkts = port_stats->rx_multicast_pkts; adapter_stats->rx_broadcast_pkts = port_stats->rx_broadcast_pkts; adapter_stats->rx_bytes = port_stats->rx_bytes; adapter_stats->rx_unicast_bytes = port_stats->rx_unicast_bytes; adapter_stats->rx_multicast_bytes = port_stats->rx_multicast_bytes; adapter_stats->rx_broadcast_bytes = port_stats->rx_broadcast_bytes; adapter_stats->rx_unknown_protos = port_stats->rx_unknown_protos; adapter_stats->rx_discards = port_stats->rx_discards; adapter_stats->rx_errors = port_stats->rx_errors; adapter_stats->rx_crc_errors = port_stats->rx_crc_errors; adapter_stats->rx_alignment_errors = port_stats->rx_alignment_errors; adapter_stats->rx_symbol_errors = port_stats->rx_symbol_errors; adapter_stats->rx_pause_frames = port_stats->rx_pause_frames; adapter_stats->rx_pause_on_frames = port_stats->rx_pause_on_frames; adapter_stats->rx_pause_off_frames = port_stats->rx_pause_off_frames; adapter_stats->rx_frames_too_long = port_stats->rx_frames_too_long; adapter_stats->rx_internal_mac_errors = port_stats->rx_internal_mac_errors; adapter_stats->rx_undersize_pkts = port_stats->rx_undersize_pkts; adapter_stats->rx_oversize_pkts = port_stats->rx_oversize_pkts; adapter_stats->rx_fragment_pkts = port_stats->rx_fragment_pkts; adapter_stats->rx_jabbers = port_stats->rx_jabbers; adapter_stats->rx_control_frames = port_stats->rx_control_frames; adapter_stats->rx_control_frames_unknown_opcode = port_stats->rx_control_frames_unknown_opcode; adapter_stats->rx_in_range_errors = port_stats->rx_in_range_errors; adapter_stats->rx_out_of_range_errors = port_stats->rx_out_of_range_errors; adapter_stats->rx_address_match_errors = port_stats->rx_address_match_errors; adapter_stats->rx_vlan_mismatch_errors = port_stats->rx_vlan_mismatch_errors; adapter_stats->rx_dropped_too_small = port_stats->rx_dropped_too_small; adapter_stats->rx_dropped_too_short = port_stats->rx_dropped_too_short; adapter_stats->rx_dropped_header_too_small = port_stats->rx_dropped_header_too_small; adapter_stats->rx_dropped_invalid_tcp_length = port_stats->rx_dropped_invalid_tcp_length; adapter_stats->rx_dropped_runt = port_stats->rx_dropped_runt; adapter_stats->rx_ip_checksum_errors = port_stats->rx_ip_checksum_errors; adapter_stats->rx_tcp_checksum_errors = port_stats->rx_tcp_checksum_errors; adapter_stats->rx_udp_checksum_errors = port_stats->rx_udp_checksum_errors; adapter_stats->rx_non_rss_pkts = port_stats->rx_non_rss_pkts; adapter_stats->rx_ipv4_pkts = port_stats->rx_ipv4_pkts; adapter_stats->rx_ipv6_pkts = port_stats->rx_ipv6_pkts; adapter_stats->rx_ipv4_bytes = port_stats->rx_ipv4_bytes; adapter_stats->rx_ipv6_bytes = port_stats->rx_ipv6_bytes; adapter_stats->rx_nic_pkts = port_stats->rx_nic_pkts; adapter_stats->rx_tcp_pkts = port_stats->rx_tcp_pkts; adapter_stats->rx_iscsi_pkts = port_stats->rx_iscsi_pkts; adapter_stats->rx_management_pkts = port_stats->rx_management_pkts; adapter_stats->rx_switched_unicast_pkts = port_stats->rx_switched_unicast_pkts; adapter_stats->rx_switched_multicast_pkts = port_stats->rx_switched_multicast_pkts; adapter_stats->rx_switched_broadcast_pkts = port_stats->rx_switched_broadcast_pkts; adapter_stats->num_forwards = port_stats->num_forwards; adapter_stats->rx_fifo_overflow = port_stats->rx_fifo_overflow; adapter_stats->rx_input_fifo_overflow = port_stats->rx_input_fifo_overflow; adapter_stats->rx_drops_too_many_frags = port_stats->rx_drops_too_many_frags; adapter_stats->rx_drops_invalid_queue = port_stats->rx_drops_invalid_queue; adapter_stats->rx_drops_mtu = port_stats->rx_drops_mtu; adapter_stats->rx_pkts_64_bytes = port_stats->rx_pkts_64_bytes; adapter_stats->rx_pkts_65_to_127_bytes = port_stats->rx_pkts_65_to_127_bytes; adapter_stats->rx_pkts_128_to_255_bytes = port_stats->rx_pkts_128_to_255_bytes; adapter_stats->rx_pkts_256_to_511_bytes = port_stats->rx_pkts_256_to_511_bytes; adapter_stats->rx_pkts_512_to_1023_bytes = port_stats->rx_pkts_512_to_1023_bytes; adapter_stats->rx_pkts_1024_to_1518_bytes = port_stats->rx_pkts_1024_to_1518_bytes; adapter_stats->rx_pkts_1519_to_2047_bytes = port_stats->rx_pkts_1519_to_2047_bytes; adapter_stats->rx_pkts_2048_to_4095_bytes = port_stats->rx_pkts_2048_to_4095_bytes; adapter_stats->rx_pkts_4096_to_8191_bytes = port_stats->rx_pkts_4096_to_8191_bytes; adapter_stats->rx_pkts_8192_to_9216_bytes = port_stats->rx_pkts_8192_to_9216_bytes; } static void copy_stats_to_sc_be2(POCE_SOFTC sc) { struct oce_be_stats *adapter_stats; struct oce_pmem_stats *pmem; struct oce_rxf_stats_v0 *rxf_stats; struct oce_port_rxf_stats_v0 *port_stats; struct mbx_get_nic_stats_v0 *nic_mbx; uint32_t port = sc->port_id; nic_mbx = OCE_DMAPTR(&sc->stats_mem, struct mbx_get_nic_stats_v0); pmem = &nic_mbx->params.rsp.stats.pmem; rxf_stats = &nic_mbx->params.rsp.stats.rxf; port_stats = &nic_mbx->params.rsp.stats.rxf.port[port]; adapter_stats = &sc->oce_stats_info.u0.be; /* Update stats */ adapter_stats->rx_pause_frames = port_stats->rx_pause_frames; adapter_stats->rx_crc_errors = port_stats->rx_crc_errors; adapter_stats->rx_control_frames = port_stats->rx_control_frames; adapter_stats->rx_in_range_errors = port_stats->rx_in_range_errors; adapter_stats->rx_frame_too_long = port_stats->rx_frame_too_long; adapter_stats->rx_dropped_runt = port_stats->rx_dropped_runt; adapter_stats->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs; adapter_stats->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs; adapter_stats->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs; adapter_stats->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop; adapter_stats->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length; adapter_stats->rx_dropped_too_small = port_stats->rx_dropped_too_small; adapter_stats->rx_dropped_too_short = port_stats->rx_dropped_too_short; adapter_stats->rx_out_range_errors = port_stats->rx_out_range_errors; adapter_stats->rx_dropped_header_too_small = port_stats->rx_dropped_header_too_small; adapter_stats->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow_drop; adapter_stats->rx_address_match_errors = port_stats->rx_address_match_errors; adapter_stats->rx_alignment_symbol_errors = port_stats->rx_alignment_symbol_errors; adapter_stats->tx_pauseframes = port_stats->tx_pauseframes; adapter_stats->tx_controlframes = port_stats->tx_controlframes; if (sc->if_id) adapter_stats->jabber_events = rxf_stats->port1_jabber_events; else adapter_stats->jabber_events = rxf_stats->port0_jabber_events; adapter_stats->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf; adapter_stats->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb; adapter_stats->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr; adapter_stats->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring; adapter_stats->forwarded_packets = rxf_stats->forwarded_packets; adapter_stats->rx_drops_mtu = rxf_stats->rx_drops_mtu; adapter_stats->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr; adapter_stats->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags; adapter_stats->eth_red_drops = pmem->eth_red_drops; } static void copy_stats_to_sc_be3(POCE_SOFTC sc) { struct oce_be_stats *adapter_stats; struct oce_pmem_stats *pmem; struct oce_rxf_stats_v1 *rxf_stats; struct oce_port_rxf_stats_v1 *port_stats; - struct mbx_get_nic_stats *nic_mbx; + struct mbx_get_nic_stats_v1 *nic_mbx; uint32_t port = sc->port_id; - nic_mbx = OCE_DMAPTR(&sc->stats_mem, struct mbx_get_nic_stats); + nic_mbx = OCE_DMAPTR(&sc->stats_mem, struct mbx_get_nic_stats_v1); pmem = &nic_mbx->params.rsp.stats.pmem; rxf_stats = &nic_mbx->params.rsp.stats.rxf; port_stats = &nic_mbx->params.rsp.stats.rxf.port[port]; adapter_stats = &sc->oce_stats_info.u0.be; /* Update stats */ adapter_stats->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop; adapter_stats->rx_priority_pause_frames = port_stats->rx_priority_pause_frames; adapter_stats->rx_pause_frames = port_stats->rx_pause_frames; adapter_stats->rx_crc_errors = port_stats->rx_crc_errors; adapter_stats->rx_control_frames = port_stats->rx_control_frames; adapter_stats->rx_in_range_errors = port_stats->rx_in_range_errors; adapter_stats->rx_frame_too_long = port_stats->rx_frame_too_long; adapter_stats->rx_dropped_runt = port_stats->rx_dropped_runt; adapter_stats->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs; adapter_stats->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs; adapter_stats->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs; adapter_stats->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length; adapter_stats->rx_dropped_too_small = port_stats->rx_dropped_too_small; adapter_stats->rx_dropped_too_short = port_stats->rx_dropped_too_short; adapter_stats->rx_out_range_errors = port_stats->rx_out_range_errors; adapter_stats->rx_dropped_header_too_small = port_stats->rx_dropped_header_too_small; adapter_stats->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow_drop; adapter_stats->rx_address_match_errors = port_stats->rx_address_match_errors; adapter_stats->rx_alignment_symbol_errors = port_stats->rx_alignment_symbol_errors; adapter_stats->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop; adapter_stats->tx_pauseframes = port_stats->tx_pauseframes; adapter_stats->tx_controlframes = port_stats->tx_controlframes; adapter_stats->jabber_events = port_stats->jabber_events; adapter_stats->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf; adapter_stats->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb; adapter_stats->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr; adapter_stats->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring; adapter_stats->forwarded_packets = rxf_stats->forwarded_packets; adapter_stats->rx_drops_mtu = rxf_stats->rx_drops_mtu; adapter_stats->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr; adapter_stats->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags; adapter_stats->eth_red_drops = pmem->eth_red_drops; } +static void +copy_stats_to_sc_sh(POCE_SOFTC sc) +{ + struct oce_be_stats *adapter_stats; + struct oce_pmem_stats *pmem; + struct oce_rxf_stats_v2 *rxf_stats; + struct oce_port_rxf_stats_v2 *port_stats; + struct mbx_get_nic_stats_v2 *nic_mbx; + struct oce_erx_stats_v2 *erx_stats; + uint32_t port = sc->port_id; + nic_mbx = OCE_DMAPTR(&sc->stats_mem, struct mbx_get_nic_stats_v2); + pmem = &nic_mbx->params.rsp.stats.pmem; + rxf_stats = &nic_mbx->params.rsp.stats.rxf; + erx_stats = &nic_mbx->params.rsp.stats.erx; + port_stats = &nic_mbx->params.rsp.stats.rxf.port[port]; + + adapter_stats = &sc->oce_stats_info.u0.be; + + /* Update stats */ + adapter_stats->pmem_fifo_overflow_drop = + port_stats->pmem_fifo_overflow_drop; + adapter_stats->rx_priority_pause_frames = + port_stats->rx_priority_pause_frames; + adapter_stats->rx_pause_frames = port_stats->rx_pause_frames; + adapter_stats->rx_crc_errors = port_stats->rx_crc_errors; + adapter_stats->rx_control_frames = port_stats->rx_control_frames; + adapter_stats->rx_in_range_errors = port_stats->rx_in_range_errors; + adapter_stats->rx_frame_too_long = port_stats->rx_frame_too_long; + adapter_stats->rx_dropped_runt = port_stats->rx_dropped_runt; + adapter_stats->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs; + adapter_stats->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs; + adapter_stats->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs; + adapter_stats->rx_dropped_tcp_length = + port_stats->rx_dropped_tcp_length; + adapter_stats->rx_dropped_too_small = port_stats->rx_dropped_too_small; + adapter_stats->rx_dropped_too_short = port_stats->rx_dropped_too_short; + adapter_stats->rx_out_range_errors = port_stats->rx_out_range_errors; + adapter_stats->rx_dropped_header_too_small = + port_stats->rx_dropped_header_too_small; + adapter_stats->rx_input_fifo_overflow_drop = + port_stats->rx_input_fifo_overflow_drop; + adapter_stats->rx_address_match_errors = + port_stats->rx_address_match_errors; + adapter_stats->rx_alignment_symbol_errors = + port_stats->rx_alignment_symbol_errors; + adapter_stats->rxpp_fifo_overflow_drop = + port_stats->rxpp_fifo_overflow_drop; + adapter_stats->tx_pauseframes = port_stats->tx_pauseframes; + adapter_stats->tx_controlframes = port_stats->tx_controlframes; + adapter_stats->jabber_events = port_stats->jabber_events; + + adapter_stats->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf; + adapter_stats->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb; + adapter_stats->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr; + adapter_stats->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring; + adapter_stats->forwarded_packets = rxf_stats->forwarded_packets; + adapter_stats->rx_drops_mtu = rxf_stats->rx_drops_mtu; + adapter_stats->rx_drops_no_tpre_descr = + rxf_stats->rx_drops_no_tpre_descr; + adapter_stats->rx_drops_too_many_frags = + rxf_stats->rx_drops_too_many_frags; + + adapter_stats->eth_red_drops = pmem->eth_red_drops; + + /* populate erx stats */ + for (int i = 0; i < sc->nrqs; i++) + sc->rq[i]->rx_stats.rx_drops_no_frags = erx_stats->rx_drops_no_fragments[sc->rq[i]->rq_id]; +} + + + int oce_stats_init(POCE_SOFTC sc) { - int rc = 0, sz; - - if (IS_BE(sc) || IS_SH(sc)) { - if (sc->flags & OCE_FLAGS_BE2) - sz = sizeof(struct mbx_get_nic_stats_v0); - else - sz = sizeof(struct mbx_get_nic_stats); - } else + int rc = 0, sz = 0; + + + if( IS_BE2(sc) ) + sz = sizeof(struct mbx_get_nic_stats_v0); + else if( IS_BE3(sc) ) + sz = sizeof(struct mbx_get_nic_stats_v1); + else if( IS_SH(sc)) + sz = sizeof(struct mbx_get_nic_stats_v2); + else if( IS_XE201(sc) ) sz = sizeof(struct mbx_get_pport_stats); rc = oce_dma_alloc(sc, sz, &sc->stats_mem, 0); return rc; } void oce_stats_free(POCE_SOFTC sc) { oce_dma_free(sc, &sc->stats_mem); } int oce_refresh_nic_stats(POCE_SOFTC sc) { int rc = 0, reset = 0; - if (IS_BE(sc) || IS_SH(sc)) { - if (sc->flags & OCE_FLAGS_BE2) { - rc = oce_mbox_get_nic_stats_v0(sc, &sc->stats_mem); - if (!rc) - copy_stats_to_sc_be2(sc); - } else { - rc = oce_mbox_get_nic_stats(sc, &sc->stats_mem); - if (!rc) - copy_stats_to_sc_be3(sc); - } - - } else { + if( IS_BE2(sc) ) { + rc = oce_mbox_get_nic_stats_v0(sc, &sc->stats_mem); + if (!rc) + copy_stats_to_sc_be2(sc); + }else if( IS_BE3(sc) ) { + rc = oce_mbox_get_nic_stats_v1(sc, &sc->stats_mem); + if (!rc) + copy_stats_to_sc_be3(sc); + }else if( IS_SH(sc)) { + rc = oce_mbox_get_nic_stats_v2(sc, &sc->stats_mem); + if (!rc) + copy_stats_to_sc_sh(sc); + }else if( IS_XE201(sc) ){ rc = oce_mbox_get_pport_stats(sc, &sc->stats_mem, reset); if (!rc) copy_stats_to_sc_xe201(sc); } - + return rc; } static int oce_sysctl_sfp_vpd_dump(SYSCTL_HANDLER_ARGS) { int result = 0, error; int rc = 0; POCE_SOFTC sc = (POCE_SOFTC) arg1; /* sysctl default handler */ error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if(result == -1) { return EINVAL; } bzero((char *)sfp_vpd_dump_buffer, TRANSCEIVER_DATA_SIZE); rc = oce_mbox_read_transrecv_data(sc, PAGE_NUM_A0); if(rc) return rc; rc = oce_mbox_read_transrecv_data(sc, PAGE_NUM_A2); if(rc) return rc; return rc; } Index: stable/11/sys/dev/oce/oce_user.h =================================================================== --- stable/11/sys/dev/oce/oce_user.h (nonexistent) +++ stable/11/sys/dev/oce/oce_user.h (revision 338938) @@ -0,0 +1,121 @@ +/*- + * Copyright (C) 2013 Emulex + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the Emulex Corporation nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Contact Information: + * freebsd-drivers@emulex.com + * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 + */ + +/* $FreeBSD$ */ + +struct oce_mbx; +struct oce_softc; +struct mbx_hdr; + +enum oce_interrupt_mode { + OCE_INTERRUPT_MODE_MSIX = 0, + OCE_INTERRUPT_MODE_INTX = 1, + OCE_INTERRUPT_MODE_MSI = 2, +}; + +#define MAX_ROCE_MSIX_VECTORS 16 +#define MIN_ROCE_MSIX_VECTORS 1 +#define ROCE_MSIX_VECTORS 2 + +struct oce_dev_info { + device_t dev; + struct ifnet *ifp; + struct oce_softc *softc; + + bus_space_handle_t db_bhandle; + bus_space_tag_t db_btag; + uint64_t unmapped_db; + uint32_t unmapped_db_len; + uint32_t db_page_size; + uint64_t dpp_unmapped_addr; + uint32_t dpp_unmapped_len; + uint8_t mac_addr[6]; + uint32_t dev_family; + uint16_t vendor_id; + uint16_t dev_id; + enum oce_interrupt_mode intr_mode; + struct { + int num_vectors; + int start_vector; + uint32_t vector_list[MAX_ROCE_MSIX_VECTORS]; + } msix; + uint32_t flags; +#define OCE_RDMA_INFO_RDMA_SUPPORTED 0x00000001 +}; + + +#define OCE_GEN2_FAMILY 2 + +#ifdef notdef +struct oce_mbx_ctx { + struct oce_mbx *mbx; + void (*cb) (void *ctx); + void *cb_ctx; +}; +#endif + +struct oce_mbx_ctx; + +typedef struct oce_rdma_info { + int size; + void (*close)(void); + int (*mbox_post)(struct oce_softc *sc, + struct oce_mbx *mbx, + struct oce_mbx_ctx *mbxctx); + void (*common_req_hdr_init)(struct mbx_hdr *hdr, + uint8_t dom, + uint8_t port, + uint8_t subsys, + uint8_t opcode, + uint32_t timeout, + uint32_t pyld_len, + uint8_t version); + void (*get_mac_addr)(struct oce_softc *sc, + uint8_t *macaddr); +} OCE_RDMA_INFO, *POCE_RDMA_INFO; + +#define OCE_RDMA_INFO_SIZE (sizeof(OCE_RDMA_INFO)) + +typedef struct oce_rdma_if { + int size; + int (*announce)(struct oce_dev_info *devinfo); +} OCE_RDMA_IF, *POCE_RDMA_IF; + +#define OCE_RDMA_IF_SIZE (sizeof(OCE_RDMA_IF)) + +int oce_register_rdma(POCE_RDMA_INFO rdma_info, POCE_RDMA_IF rdma_if); Property changes on: stable/11/sys/dev/oce/oce_user.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: stable/11 =================================================================== --- stable/11 (revision 338937) +++ stable/11 (revision 338938) Property changes on: stable/11 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r306219