diff --git a/sys/dev/qlxgb/qla_hw.c b/sys/dev/qlxgb/qla_hw.c index f96d85fba139..21d37c624e5a 100644 --- a/sys/dev/qlxgb/qla_hw.c +++ b/sys/dev/qlxgb/qla_hw.c @@ -1,1825 +1,1829 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2011-2012 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * File: qla_hw.c * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. * Content: Contains Hardware dependent functions */ #include __FBSDID("$FreeBSD$"); #include "qla_os.h" #include "qla_reg.h" #include "qla_hw.h" #include "qla_def.h" #include "qla_inline.h" #include "qla_ver.h" #include "qla_glbl.h" #include "qla_dbg.h" static uint32_t sysctl_num_rds_rings = 2; static uint32_t sysctl_num_sds_rings = 4; /* * Static Functions */ static void qla_init_cntxt_regions(qla_host_t *ha); static int qla_issue_cmd(qla_host_t *ha, qla_cdrp_t *cdrp); static int qla_fw_cmd(qla_host_t *ha, void *fw_cmd, uint32_t size); static int qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint16_t cntxt_id, uint32_t add_multi); static void qla_del_rcv_cntxt(qla_host_t *ha); static int qla_init_rcv_cntxt(qla_host_t *ha); static void qla_del_xmt_cntxt(qla_host_t *ha); static int qla_init_xmt_cntxt(qla_host_t *ha); static int qla_get_max_rds(qla_host_t *ha); static int qla_get_max_sds(qla_host_t *ha); static int qla_get_max_rules(qla_host_t *ha); static int qla_get_max_rcv_cntxts(qla_host_t *ha); static int qla_get_max_tx_cntxts(qla_host_t *ha); static int qla_get_max_mtu(qla_host_t *ha); static int qla_get_max_lro(qla_host_t *ha); static int qla_get_flow_control(qla_host_t *ha); static void qla_hw_tx_done_locked(qla_host_t *ha); int qla_get_msix_count(qla_host_t *ha) { return (sysctl_num_sds_rings); } /* * Name: qla_hw_add_sysctls * Function: Add P3Plus specific sysctls */ void qla_hw_add_sysctls(qla_host_t *ha) { device_t dev; dev = ha->pci_dev; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "num_rds_rings", CTLFLAG_RD, &sysctl_num_rds_rings, sysctl_num_rds_rings, "Number of Rcv Descriptor Rings"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "num_sds_rings", CTLFLAG_RD, &sysctl_num_sds_rings, sysctl_num_sds_rings, "Number of Status Descriptor Rings"); } /* * Name: qla_free_dma * Function: Frees the DMA'able memory allocated in qla_alloc_dma() */ void qla_free_dma(qla_host_t *ha) { uint32_t i; if (ha->hw.dma_buf.flags.context) { qla_free_dmabuf(ha, &ha->hw.dma_buf.context); ha->hw.dma_buf.flags.context = 0; } if (ha->hw.dma_buf.flags.sds_ring) { for (i = 0; i < ha->hw.num_sds_rings; i++) qla_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]); ha->hw.dma_buf.flags.sds_ring = 0; } if (ha->hw.dma_buf.flags.rds_ring) { for (i = 0; i < ha->hw.num_rds_rings; i++) qla_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]); ha->hw.dma_buf.flags.rds_ring = 0; } if (ha->hw.dma_buf.flags.tx_ring) { qla_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring); ha->hw.dma_buf.flags.tx_ring = 0; } } /* * Name: qla_alloc_dma * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts. */ int qla_alloc_dma(qla_host_t *ha) { device_t dev; uint32_t i, j, size; dev = ha->pci_dev; QL_DPRINT2((dev, "%s: enter\n", __func__)); ha->hw.num_rds_rings = (uint16_t)sysctl_num_rds_rings; ha->hw.num_sds_rings = (uint16_t)sysctl_num_sds_rings; /* * Allocate Transmit Ring */ ha->hw.dma_buf.tx_ring.alignment = 8; ha->hw.dma_buf.tx_ring.size = (sizeof(q80_tx_cmd_t)) * NUM_TX_DESCRIPTORS; if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.tx_ring)) { device_printf(dev, "%s: tx ring alloc failed\n", __func__); goto qla_alloc_dma_exit; } ha->hw.dma_buf.flags.tx_ring = 1; QL_DPRINT2((dev, "%s: tx_ring phys %p virt %p\n", __func__, (void *)(ha->hw.dma_buf.tx_ring.dma_addr), ha->hw.dma_buf.tx_ring.dma_b)); /* * Allocate Receive Descriptor Rings */ for (i = 0; i < ha->hw.num_rds_rings; i++) { ha->hw.dma_buf.rds_ring[i].alignment = 8; if (i == RDS_RING_INDEX_NORMAL) { ha->hw.dma_buf.rds_ring[i].size = (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS; } else if (i == RDS_RING_INDEX_JUMBO) { ha->hw.dma_buf.rds_ring[i].size = (sizeof(q80_recv_desc_t)) * NUM_RX_JUMBO_DESCRIPTORS; } else break; if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i])) { QL_DPRINT4((dev, "%s: rds ring alloc failed\n", __func__)); for (j = 0; j < i; j++) qla_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[j]); goto qla_alloc_dma_exit; } QL_DPRINT4((dev, "%s: rx_ring[%d] phys %p virt %p\n", __func__, i, (void *)(ha->hw.dma_buf.rds_ring[i].dma_addr), ha->hw.dma_buf.rds_ring[i].dma_b)); } ha->hw.dma_buf.flags.rds_ring = 1; /* * Allocate Status Descriptor Rings */ for (i = 0; i < ha->hw.num_sds_rings; i++) { ha->hw.dma_buf.sds_ring[i].alignment = 8; ha->hw.dma_buf.sds_ring[i].size = (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS; if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i])) { device_printf(dev, "%s: sds ring alloc failed\n", __func__); for (j = 0; j < i; j++) qla_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[j]); goto qla_alloc_dma_exit; } QL_DPRINT4((dev, "%s: sds_ring[%d] phys %p virt %p\n", __func__, i, (void *)(ha->hw.dma_buf.sds_ring[i].dma_addr), ha->hw.dma_buf.sds_ring[i].dma_b)); } ha->hw.dma_buf.flags.sds_ring = 1; /* * Allocate Context Area */ size = QL_ALIGN((sizeof (q80_tx_cntxt_req_t)), QL_BUFFER_ALIGN); size += QL_ALIGN((sizeof (q80_tx_cntxt_rsp_t)), QL_BUFFER_ALIGN); size += QL_ALIGN((sizeof (q80_rcv_cntxt_req_t)), QL_BUFFER_ALIGN); size += QL_ALIGN((sizeof (q80_rcv_cntxt_rsp_t)), QL_BUFFER_ALIGN); size += sizeof (uint32_t); /* for tx consumer index */ size = QL_ALIGN(size, PAGE_SIZE); ha->hw.dma_buf.context.alignment = 8; ha->hw.dma_buf.context.size = size; if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.context)) { device_printf(dev, "%s: context alloc failed\n", __func__); goto qla_alloc_dma_exit; } ha->hw.dma_buf.flags.context = 1; QL_DPRINT2((dev, "%s: context phys %p virt %p\n", __func__, (void *)(ha->hw.dma_buf.context.dma_addr), ha->hw.dma_buf.context.dma_b)); qla_init_cntxt_regions(ha); return 0; qla_alloc_dma_exit: qla_free_dma(ha); return -1; } /* * Name: qla_init_cntxt_regions * Function: Initializes Tx/Rx Contexts. */ static void qla_init_cntxt_regions(qla_host_t *ha) { qla_hw_t *hw; q80_tx_cntxt_req_t *tx_cntxt_req; q80_rcv_cntxt_req_t *rx_cntxt_req; bus_addr_t phys_addr; uint32_t i; uint32_t size; hw = &ha->hw; hw->tx_ring_base = hw->dma_buf.tx_ring.dma_b; for (i = 0; i < ha->hw.num_sds_rings; i++) hw->sds[i].sds_ring_base = (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b; phys_addr = hw->dma_buf.context.dma_addr; memset((void *)hw->dma_buf.context.dma_b, 0, ha->hw.dma_buf.context.size); hw->tx_cntxt_req = (q80_tx_cntxt_req_t *)hw->dma_buf.context.dma_b; hw->tx_cntxt_req_paddr = phys_addr; size = QL_ALIGN((sizeof (q80_tx_cntxt_req_t)), QL_BUFFER_ALIGN); hw->tx_cntxt_rsp = (q80_tx_cntxt_rsp_t *)((uint8_t *)hw->tx_cntxt_req + size); hw->tx_cntxt_rsp_paddr = hw->tx_cntxt_req_paddr + size; size = QL_ALIGN((sizeof (q80_tx_cntxt_rsp_t)), QL_BUFFER_ALIGN); hw->rx_cntxt_req = (q80_rcv_cntxt_req_t *)((uint8_t *)hw->tx_cntxt_rsp + size); hw->rx_cntxt_req_paddr = hw->tx_cntxt_rsp_paddr + size; size = QL_ALIGN((sizeof (q80_rcv_cntxt_req_t)), QL_BUFFER_ALIGN); hw->rx_cntxt_rsp = (q80_rcv_cntxt_rsp_t *)((uint8_t *)hw->rx_cntxt_req + size); hw->rx_cntxt_rsp_paddr = hw->rx_cntxt_req_paddr + size; size = QL_ALIGN((sizeof (q80_rcv_cntxt_rsp_t)), QL_BUFFER_ALIGN); hw->tx_cons = (uint32_t *)((uint8_t *)hw->rx_cntxt_rsp + size); hw->tx_cons_paddr = hw->rx_cntxt_rsp_paddr + size; /* * Initialize the Transmit Context Request so that we don't need to * do it every time we need to create a context */ tx_cntxt_req = hw->tx_cntxt_req; tx_cntxt_req->rsp_dma_addr = qla_host_to_le64(hw->tx_cntxt_rsp_paddr); tx_cntxt_req->cmd_cons_dma_addr = qla_host_to_le64(hw->tx_cons_paddr); tx_cntxt_req->caps[0] = qla_host_to_le32((CNTXT_CAP0_BASEFW | CNTXT_CAP0_LEGACY_MN | CNTXT_CAP0_LSO)); tx_cntxt_req->intr_mode = qla_host_to_le32(CNTXT_INTR_MODE_SHARED); tx_cntxt_req->phys_addr = qla_host_to_le64(hw->dma_buf.tx_ring.dma_addr); tx_cntxt_req->num_entries = qla_host_to_le32(NUM_TX_DESCRIPTORS); /* * Initialize the Receive Context Request */ rx_cntxt_req = hw->rx_cntxt_req; rx_cntxt_req->rx_req.rsp_dma_addr = qla_host_to_le64(hw->rx_cntxt_rsp_paddr); rx_cntxt_req->rx_req.caps[0] = qla_host_to_le32(CNTXT_CAP0_BASEFW | CNTXT_CAP0_LEGACY_MN | CNTXT_CAP0_JUMBO | CNTXT_CAP0_LRO| CNTXT_CAP0_HW_LRO); rx_cntxt_req->rx_req.intr_mode = qla_host_to_le32(CNTXT_INTR_MODE_SHARED); rx_cntxt_req->rx_req.rds_intr_mode = qla_host_to_le32(CNTXT_INTR_MODE_UNIQUE); rx_cntxt_req->rx_req.rds_ring_offset = 0; rx_cntxt_req->rx_req.sds_ring_offset = qla_host_to_le32( (hw->num_rds_rings * sizeof(q80_rq_rds_ring_t))); rx_cntxt_req->rx_req.num_rds_rings = qla_host_to_le16(hw->num_rds_rings); rx_cntxt_req->rx_req.num_sds_rings = qla_host_to_le16(hw->num_sds_rings); for (i = 0; i < hw->num_rds_rings; i++) { rx_cntxt_req->rds_req[i].phys_addr = qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr); if (i == RDS_RING_INDEX_NORMAL) { rx_cntxt_req->rds_req[i].buf_size = qla_host_to_le64(MCLBYTES); rx_cntxt_req->rds_req[i].size = qla_host_to_le32(NUM_RX_DESCRIPTORS); } else { rx_cntxt_req->rds_req[i].buf_size = qla_host_to_le64(MJUM9BYTES); rx_cntxt_req->rds_req[i].size = qla_host_to_le32(NUM_RX_JUMBO_DESCRIPTORS); } } for (i = 0; i < hw->num_sds_rings; i++) { rx_cntxt_req->sds_req[i].phys_addr = qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr); rx_cntxt_req->sds_req[i].size = qla_host_to_le32(NUM_STATUS_DESCRIPTORS); rx_cntxt_req->sds_req[i].msi_index = qla_host_to_le16(i); } QL_DPRINT2((ha->pci_dev, "%s: tx_cntxt_req = %p paddr %p\n", __func__, hw->tx_cntxt_req, (void *)hw->tx_cntxt_req_paddr)); QL_DPRINT2((ha->pci_dev, "%s: tx_cntxt_rsp = %p paddr %p\n", __func__, hw->tx_cntxt_rsp, (void *)hw->tx_cntxt_rsp_paddr)); QL_DPRINT2((ha->pci_dev, "%s: rx_cntxt_req = %p paddr %p\n", __func__, hw->rx_cntxt_req, (void *)hw->rx_cntxt_req_paddr)); QL_DPRINT2((ha->pci_dev, "%s: rx_cntxt_rsp = %p paddr %p\n", __func__, hw->rx_cntxt_rsp, (void *)hw->rx_cntxt_rsp_paddr)); QL_DPRINT2((ha->pci_dev, "%s: tx_cons = %p paddr %p\n", __func__, hw->tx_cons, (void *)hw->tx_cons_paddr)); } /* * Name: qla_issue_cmd * Function: Issues commands on the CDRP interface and returns responses. */ static int qla_issue_cmd(qla_host_t *ha, qla_cdrp_t *cdrp) { int ret = 0; uint32_t signature; uint32_t count = 400; /* 4 seconds or 400 10ms intervals */ uint32_t data; device_t dev; dev = ha->pci_dev; signature = 0xcafe0000 | 0x0100 | ha->pci_func; ret = qla_sem_lock(ha, Q8_SEM5_LOCK, 0, (uint32_t)ha->pci_func); if (ret) { device_printf(dev, "%s: SEM5_LOCK lock failed\n", __func__); return (ret); } WRITE_OFFSET32(ha, Q8_NX_CDRP_SIGNATURE, signature); WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG1, (cdrp->cmd_arg1)); WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG2, (cdrp->cmd_arg2)); WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG3, (cdrp->cmd_arg3)); WRITE_OFFSET32(ha, Q8_NX_CDRP_CMD_RSP, cdrp->cmd); while (count) { qla_mdelay(__func__, 10); data = READ_REG32(ha, Q8_NX_CDRP_CMD_RSP); if ((!(data & 0x80000000))) break; count--; } if ((!count) || (data != 1)) ret = -1; cdrp->rsp = READ_REG32(ha, Q8_NX_CDRP_CMD_RSP); cdrp->rsp_arg1 = READ_REG32(ha, Q8_NX_CDRP_ARG1); cdrp->rsp_arg2 = READ_REG32(ha, Q8_NX_CDRP_ARG2); cdrp->rsp_arg3 = READ_REG32(ha, Q8_NX_CDRP_ARG3); qla_sem_unlock(ha, Q8_SEM5_UNLOCK); if (ret) { device_printf(dev, "%s: " "cmd[0x%08x] = 0x%08x\n" "\tsig[0x%08x] = 0x%08x\n" "\targ1[0x%08x] = 0x%08x\n" "\targ2[0x%08x] = 0x%08x\n" "\targ3[0x%08x] = 0x%08x\n", __func__, Q8_NX_CDRP_CMD_RSP, cdrp->cmd, Q8_NX_CDRP_SIGNATURE, signature, Q8_NX_CDRP_ARG1, cdrp->cmd_arg1, Q8_NX_CDRP_ARG2, cdrp->cmd_arg2, Q8_NX_CDRP_ARG3, cdrp->cmd_arg3); device_printf(dev, "%s: exit (ret = 0x%x)\n" "\t\t rsp = 0x%08x\n" "\t\t arg1 = 0x%08x\n" "\t\t arg2 = 0x%08x\n" "\t\t arg3 = 0x%08x\n", __func__, ret, cdrp->rsp, cdrp->rsp_arg1, cdrp->rsp_arg2, cdrp->rsp_arg3); } return (ret); } #define QLA_TX_MIN_FREE 2 /* * Name: qla_fw_cmd * Function: Issues firmware control commands on the Tx Ring. */ static int qla_fw_cmd(qla_host_t *ha, void *fw_cmd, uint32_t size) { device_t dev; q80_tx_cmd_t *tx_cmd; qla_hw_t *hw = &ha->hw; int count = 100; dev = ha->pci_dev; QLA_TX_LOCK(ha); if (hw->txr_free <= QLA_TX_MIN_FREE) { while (count--) { qla_hw_tx_done_locked(ha); if (hw->txr_free > QLA_TX_MIN_FREE) break; QLA_TX_UNLOCK(ha); qla_mdelay(__func__, 10); QLA_TX_LOCK(ha); } if (hw->txr_free <= QLA_TX_MIN_FREE) { QLA_TX_UNLOCK(ha); device_printf(dev, "%s: xmit queue full\n", __func__); return (-1); } } tx_cmd = &hw->tx_ring_base[hw->txr_next]; bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); bcopy(fw_cmd, tx_cmd, size); hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); hw->txr_free--; QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->txr_next); QLA_TX_UNLOCK(ha); return (0); } /* * Name: qla_config_rss * Function: Configure RSS for the context/interface. */ const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, 0x255b0ec26d5a56daULL }; static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id) { qla_fw_cds_config_rss_t rss_config; int ret, i; bzero(&rss_config, sizeof(qla_fw_cds_config_rss_t)); rss_config.hdr.cmd = Q8_FWCD_CNTRL_REQ; rss_config.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_RSS; rss_config.hdr.cntxt_id = cntxt_id; rss_config.hash_type = (Q8_FWCD_RSS_HASH_TYPE_IPV4_TCP_IP | Q8_FWCD_RSS_HASH_TYPE_IPV6_TCP_IP); rss_config.flags = Q8_FWCD_RSS_FLAGS_ENABLE_RSS; rss_config.ind_tbl_mask = 0x7; for (i = 0; i < 5; i++) rss_config.rss_key[i] = rss_key[i]; ret = qla_fw_cmd(ha, &rss_config, sizeof(qla_fw_cds_config_rss_t)); return ret; } /* * Name: qla_config_intr_coalesce * Function: Configure Interrupt Coalescing. */ static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable) { qla_fw_cds_config_intr_coalesc_t intr_coalesce; int ret; bzero(&intr_coalesce, sizeof(qla_fw_cds_config_intr_coalesc_t)); intr_coalesce.hdr.cmd = Q8_FWCD_CNTRL_REQ; intr_coalesce.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_INTR_COALESCING; intr_coalesce.hdr.cntxt_id = cntxt_id; intr_coalesce.flags = 0x04; intr_coalesce.max_rcv_pkts = 256; intr_coalesce.max_rcv_usecs = 3; intr_coalesce.max_snd_pkts = 64; intr_coalesce.max_snd_usecs = 4; if (tenable) { intr_coalesce.usecs_to = 1000; /* 1 millisecond */ intr_coalesce.timer_type = Q8_FWCMD_INTR_COALESC_TIMER_PERIODIC; intr_coalesce.sds_ring_bitmask = Q8_FWCMD_INTR_COALESC_SDS_RING_0; } ret = qla_fw_cmd(ha, &intr_coalesce, sizeof(qla_fw_cds_config_intr_coalesc_t)); return ret; } /* * Name: qla_config_mac_addr * Function: binds a MAC address to the context/interface. * Can be unicast, multicast or broadcast. */ static int qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint16_t cntxt_id, uint32_t add_multi) { qla_fw_cds_config_mac_addr_t mac_config; int ret; // device_printf(ha->pci_dev, // "%s: mac_addr %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, // mac_addr[0], mac_addr[1], mac_addr[2], // mac_addr[3], mac_addr[4], mac_addr[5]); bzero(&mac_config, sizeof(qla_fw_cds_config_mac_addr_t)); mac_config.hdr.cmd = Q8_FWCD_CNTRL_REQ; mac_config.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_MAC_ADDR; mac_config.hdr.cntxt_id = cntxt_id; if (add_multi) mac_config.cmd = Q8_FWCD_ADD_MAC_ADDR; else mac_config.cmd = Q8_FWCD_DEL_MAC_ADDR; bcopy(mac_addr, mac_config.mac_addr,6); ret = qla_fw_cmd(ha, &mac_config, sizeof(qla_fw_cds_config_mac_addr_t)); return ret; } /* * Name: qla_set_mac_rcv_mode * Function: Enable/Disable AllMulticast and Promiscuous Modes. */ static int qla_set_mac_rcv_mode(qla_host_t *ha, uint16_t cntxt_id, uint32_t mode) { qla_set_mac_rcv_mode_t rcv_mode; int ret; bzero(&rcv_mode, sizeof(qla_set_mac_rcv_mode_t)); rcv_mode.hdr.cmd = Q8_FWCD_CNTRL_REQ; rcv_mode.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_MAC_RCV_MODE; rcv_mode.hdr.cntxt_id = cntxt_id; rcv_mode.mode = mode; ret = qla_fw_cmd(ha, &rcv_mode, sizeof(qla_set_mac_rcv_mode_t)); return ret; } void qla_set_promisc(qla_host_t *ha) { (void)qla_set_mac_rcv_mode(ha, (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, Q8_MAC_RCV_ENABLE_PROMISCUOUS); } void qla_set_allmulti(qla_host_t *ha) { (void)qla_set_mac_rcv_mode(ha, (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, Q8_MAC_RCV_ENABLE_ALLMULTI); } void qla_reset_promisc_allmulti(qla_host_t *ha) { (void)qla_set_mac_rcv_mode(ha, (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, Q8_MAC_RCV_RESET_PROMISC_ALLMULTI); } /* * Name: qla_config_ipv4_addr * Function: Configures the Destination IP Addr for LRO. */ void qla_config_ipv4_addr(qla_host_t *ha, uint32_t ipv4_addr) { qla_config_ipv4_t ip_conf; bzero(&ip_conf, sizeof(qla_config_ipv4_t)); ip_conf.hdr.cmd = Q8_FWCD_CNTRL_REQ; ip_conf.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_IPADDR; ip_conf.hdr.cntxt_id = (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id; ip_conf.cmd = (uint64_t)Q8_CONFIG_CMD_IP_ENABLE; ip_conf.ipv4_addr = (uint64_t)ipv4_addr; (void)qla_fw_cmd(ha, &ip_conf, sizeof(qla_config_ipv4_t)); return; } /* * Name: qla_tx_tso * Function: Checks if the packet to be transmitted is a candidate for * Large TCP Segment Offload. If yes, the appropriate fields in the Tx * Ring Structure are plugged in. */ static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr) { struct ether_vlan_header *eh; struct ip *ip = NULL; struct tcphdr *th = NULL; uint32_t ehdrlen, hdrlen = 0, ip_hlen, tcp_hlen, tcp_opt_off; uint16_t etype, opcode, offload = 1; uint8_t *tcp_opt; device_t dev; dev = ha->pci_dev; eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; etype = ntohs(eh->evl_proto); } else { ehdrlen = ETHER_HDR_LEN; etype = ntohs(eh->evl_encap_proto); } switch (etype) { case ETHERTYPE_IP: tcp_opt_off = ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr); if (mp->m_len < tcp_opt_off) { m_copydata(mp, 0, tcp_opt_off, hdr); ip = (struct ip *)hdr; } else { ip = (struct ip *)(mp->m_data + ehdrlen); } ip_hlen = ip->ip_hl << 2; opcode = Q8_TX_CMD_OP_XMT_TCP_LSO; if ((ip->ip_p != IPPROTO_TCP) || (ip_hlen != sizeof (struct ip))) { offload = 0; } else { th = (struct tcphdr *)((caddr_t)ip + ip_hlen); } break; default: QL_DPRINT8((dev, "%s: type!=ip\n", __func__)); offload = 0; break; } if (!offload) return (-1); tcp_hlen = th->th_off << 2; hdrlen = ehdrlen + ip_hlen + tcp_hlen; if (mp->m_len < hdrlen) { if (mp->m_len < tcp_opt_off) { if (tcp_hlen > sizeof(struct tcphdr)) { m_copydata(mp, tcp_opt_off, (tcp_hlen - sizeof(struct tcphdr)), &hdr[tcp_opt_off]); } } else { m_copydata(mp, 0, hdrlen, hdr); } } if ((mp->m_pkthdr.csum_flags & CSUM_TSO) == 0) { /* If TCP options are preset only time stamp option is supported */ if ((tcp_hlen - sizeof(struct tcphdr)) != 10) return -1; else { if (mp->m_len < hdrlen) { tcp_opt = &hdr[tcp_opt_off]; } else { tcp_opt = (uint8_t *)(mp->m_data + tcp_opt_off); } if ((*tcp_opt != 0x01) || (*(tcp_opt + 1) != 0x01) || (*(tcp_opt + 2) != 0x08) || (*(tcp_opt + 3) != 10)) { return -1; } } tx_cmd->mss = ha->max_frame_size - ETHER_CRC_LEN - hdrlen; } else { tx_cmd->mss = mp->m_pkthdr.tso_segsz; } tx_cmd->flags_opcode = opcode ; tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen; tx_cmd->ip_hdr_off = ehdrlen; tx_cmd->mss = mp->m_pkthdr.tso_segsz; tx_cmd->total_hdr_len = hdrlen; /* Check for Multicast least significant bit of MSB == 1 */ if (eh->evl_dhost[0] & 0x01) { tx_cmd->flags_opcode = Q8_TX_CMD_FLAGS_MULTICAST; } if (mp->m_len < hdrlen) { return (1); } return (0); } /* * Name: qla_tx_chksum * Function: Checks if the packet to be transmitted is a candidate for * TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx * Ring Structure are plugged in. */ static int qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd) { struct ether_vlan_header *eh; struct ip *ip; struct ip6_hdr *ip6; uint32_t ehdrlen, ip_hlen; uint16_t etype, opcode, offload = 1; device_t dev; dev = ha->pci_dev; if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0) return (-1); eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; etype = ntohs(eh->evl_proto); } else { ehdrlen = ETHER_HDR_LEN; etype = ntohs(eh->evl_encap_proto); } switch (etype) { case ETHERTYPE_IP: ip = (struct ip *)(mp->m_data + ehdrlen); ip_hlen = sizeof (struct ip); if (mp->m_len < (ehdrlen + ip_hlen)) { device_printf(dev, "%s: ipv4 mlen\n", __func__); offload = 0; break; } if (ip->ip_p == IPPROTO_TCP) opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM; else if (ip->ip_p == IPPROTO_UDP) opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM; else { device_printf(dev, "%s: ipv4\n", __func__); offload = 0; } break; case ETHERTYPE_IPV6: ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); ip_hlen = sizeof(struct ip6_hdr); if (mp->m_len < (ehdrlen + ip_hlen)) { device_printf(dev, "%s: ipv6 mlen\n", __func__); offload = 0; break; } if (ip6->ip6_nxt == IPPROTO_TCP) opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6; else if (ip6->ip6_nxt == IPPROTO_UDP) opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6; else { device_printf(dev, "%s: ipv6\n", __func__); offload = 0; } break; default: offload = 0; break; } if (!offload) return (-1); tx_cmd->flags_opcode = opcode; tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen; return (0); } /* * Name: qla_hw_send * Function: Transmits a packet. It first checks if the packet is a * candidate for Large TCP Segment Offload and then for UDP/TCP checksum * offload. If either of these creteria are not met, it is transmitted * as a regular ethernet frame. */ int qla_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs, uint32_t *tx_idx, struct mbuf *mp) { struct ether_vlan_header *eh; qla_hw_t *hw = &ha->hw; q80_tx_cmd_t *tx_cmd, tso_cmd; bus_dma_segment_t *c_seg; uint32_t num_tx_cmds, hdr_len = 0; uint32_t total_length = 0, bytes, tx_cmd_count = 0; device_t dev; int i, ret; uint8_t *src = NULL, *dst = NULL; dev = ha->pci_dev; /* * Always make sure there is atleast one empty slot in the tx_ring * tx_ring is considered full when there only one entry available */ num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2; total_length = mp->m_pkthdr.len; if (total_length > QLA_MAX_TSO_FRAME_SIZE) { device_printf(dev, "%s: total length exceeds maxlen(%d)\n", __func__, total_length); return (-1); } eh = mtod(mp, struct ether_vlan_header *); if ((mp->m_pkthdr.len > ha->max_frame_size)||(nsegs > Q8_TX_MAX_SEGMENTS)) { bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t)); src = ha->hw.frame_hdr; ret = qla_tx_tso(ha, mp, &tso_cmd, src); if (!(ret & ~1)) { /* find the additional tx_cmd descriptors required */ hdr_len = tso_cmd.total_hdr_len; bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN; bytes = QL_MIN(bytes, hdr_len); num_tx_cmds++; hdr_len -= bytes; while (hdr_len) { bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len); hdr_len -= bytes; num_tx_cmds++; } hdr_len = tso_cmd.total_hdr_len; if (ret == 0) src = (uint8_t *)eh; } } if (hw->txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) { qla_hw_tx_done_locked(ha); if (hw->txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) { QL_DPRINT8((dev, "%s: (hw->txr_free <= " "(num_tx_cmds + QLA_TX_MIN_FREE))\n", __func__)); return (-1); } } *tx_idx = hw->txr_next; tx_cmd = &hw->tx_ring_base[hw->txr_next]; if (hdr_len == 0) { if ((nsegs > Q8_TX_MAX_SEGMENTS) || (mp->m_pkthdr.len > ha->max_frame_size)){ device_printf(dev, "%s: (nsegs[%d, %d, 0x%b] > Q8_TX_MAX_SEGMENTS)\n", __func__, nsegs, mp->m_pkthdr.len, (int)mp->m_pkthdr.csum_flags, CSUM_BITS); qla_dump_buf8(ha, "qla_hw_send: wrong pkt", mtod(mp, char *), mp->m_len); return (EINVAL); } bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); if (qla_tx_chksum(ha, mp, tx_cmd) != 0) tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER; } else { bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t)); } if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED; else if (mp->m_flags & M_VLANTAG) { tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED | Q8_TX_CMD_FLAGS_HW_VLAN_ID); tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag; } tx_cmd->n_bufs = (uint8_t)nsegs; tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF); tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8))); tx_cmd->port_cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func); c_seg = segs; while (1) { for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) { switch (i) { case 0: tx_cmd->buf1_addr = c_seg->ds_addr; tx_cmd->buf1_len = c_seg->ds_len; break; case 1: tx_cmd->buf2_addr = c_seg->ds_addr; tx_cmd->buf2_len = c_seg->ds_len; break; case 2: tx_cmd->buf3_addr = c_seg->ds_addr; tx_cmd->buf3_len = c_seg->ds_len; break; case 3: tx_cmd->buf4_addr = c_seg->ds_addr; tx_cmd->buf4_len = c_seg->ds_len; break; } c_seg++; nsegs--; } hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); tx_cmd_count++; if (!nsegs) break; tx_cmd = &hw->tx_ring_base[hw->txr_next]; bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); } if (hdr_len) { /* TSO : Copy the header in the following tx cmd descriptors */ tx_cmd = &hw->tx_ring_base[hw->txr_next]; bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN; bytes = QL_MIN(bytes, hdr_len); dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN; if (mp->m_flags & M_VLANTAG) { /* first copy the src/dst MAC addresses */ bcopy(src, dst, (ETHER_ADDR_LEN * 2)); dst += (ETHER_ADDR_LEN * 2); src += (ETHER_ADDR_LEN * 2); hdr_len -= (ETHER_ADDR_LEN * 2); *((uint16_t *)dst) = htons(ETHERTYPE_VLAN); dst += 2; *((uint16_t *)dst) = mp->m_pkthdr.ether_vtag; dst += 2; bytes -= ((ETHER_ADDR_LEN * 2) + 4); bcopy(src, dst, bytes); src += bytes; hdr_len -= bytes; } else { bcopy(src, dst, bytes); src += bytes; hdr_len -= bytes; } hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); tx_cmd_count++; while (hdr_len) { tx_cmd = &hw->tx_ring_base[hw->txr_next]; bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len); bcopy(src, tx_cmd, bytes); src += bytes; hdr_len -= bytes; hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); tx_cmd_count++; } } hw->txr_free = hw->txr_free - tx_cmd_count; QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->txr_next); QL_DPRINT8((dev, "%s: return\n", __func__)); return (0); } /* * Name: qla_del_hw_if * Function: Destroys the hardware specific entities corresponding to an * Ethernet Interface */ void qla_del_hw_if(qla_host_t *ha) { int i; for (i = 0; i < ha->hw.num_sds_rings; i++) QL_DISABLE_INTERRUPTS(ha, i); qla_del_rcv_cntxt(ha); qla_del_xmt_cntxt(ha); ha->hw.flags.lro = 0; } /* * Name: qla_init_hw_if * Function: Creates the hardware specific entities corresponding to an * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address * corresponding to the interface. Enables LRO if allowed. */ int qla_init_hw_if(qla_host_t *ha) { int i; uint8_t bcast_mac[6]; qla_get_hw_caps(ha); for (i = 0; i < ha->hw.num_sds_rings; i++) { bzero(ha->hw.dma_buf.sds_ring[i].dma_b, ha->hw.dma_buf.sds_ring[i].size); } /* * Create Receive Context */ if (qla_init_rcv_cntxt(ha)) { return (-1); } ha->hw.rx_next = NUM_RX_DESCRIPTORS - 2; ha->hw.rxj_next = NUM_RX_JUMBO_DESCRIPTORS - 2; ha->hw.rx_in = ha->hw.rxj_in = 0; /* Update the RDS Producer Indices */ QL_UPDATE_RDS_PRODUCER_INDEX(ha, 0, ha->hw.rx_next); QL_UPDATE_RDS_PRODUCER_INDEX(ha, 1, ha->hw.rxj_next); /* * Create Transmit Context */ if (qla_init_xmt_cntxt(ha)) { qla_del_rcv_cntxt(ha); return (-1); } qla_config_mac_addr(ha, ha->hw.mac_addr, (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 1); bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF; bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF; qla_config_mac_addr(ha, bcast_mac, (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 1); qla_config_rss(ha, (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id); qla_config_intr_coalesce(ha, (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 0); for (i = 0; i < ha->hw.num_sds_rings; i++) QL_ENABLE_INTERRUPTS(ha, i); return (0); } /* * Name: qla_init_rcv_cntxt * Function: Creates the Receive Context. */ static int qla_init_rcv_cntxt(qla_host_t *ha) { device_t dev; qla_cdrp_t cdrp; q80_rcv_cntxt_rsp_t *rsp; q80_stat_desc_t *sdesc; bus_addr_t phys_addr; int i, j; qla_hw_t *hw = &ha->hw; dev = ha->pci_dev; /* * Create Receive Context */ for (i = 0; i < hw->num_sds_rings; i++) { sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0]; for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) { sdesc->data[0] = Q8_STAT_DESC_SET_OWNER(Q8_STAT_DESC_OWNER_FW); } } phys_addr = ha->hw.rx_cntxt_req_paddr; bzero(&cdrp, sizeof(qla_cdrp_t)); cdrp.cmd = Q8_CMD_CREATE_RX_CNTXT; cdrp.cmd_arg1 = (uint32_t)(phys_addr >> 32); cdrp.cmd_arg2 = (uint32_t)(phys_addr); cdrp.cmd_arg3 = (uint32_t)(sizeof (q80_rcv_cntxt_req_t)); if (qla_issue_cmd(ha, &cdrp)) { device_printf(dev, "%s: Q8_CMD_CREATE_RX_CNTXT failed\n", __func__); return (-1); } else { rsp = ha->hw.rx_cntxt_rsp; QL_DPRINT2((dev, "%s: rcv cntxt successful" " rds_ring_offset = 0x%08x" " sds_ring_offset = 0x%08x" " cntxt_state = 0x%08x" " funcs_per_port = 0x%08x" " num_rds_rings = 0x%04x" " num_sds_rings = 0x%04x" " cntxt_id = 0x%04x" " phys_port = 0x%02x" " virt_port = 0x%02x\n", __func__, rsp->rx_rsp.rds_ring_offset, rsp->rx_rsp.sds_ring_offset, rsp->rx_rsp.cntxt_state, rsp->rx_rsp.funcs_per_port, rsp->rx_rsp.num_rds_rings, rsp->rx_rsp.num_sds_rings, rsp->rx_rsp.cntxt_id, rsp->rx_rsp.phys_port, rsp->rx_rsp.virt_port)); for (i = 0; i < ha->hw.num_rds_rings; i++) { QL_DPRINT2((dev, "%s: rcv cntxt rds[%i].producer_reg = 0x%08x\n", __func__, i, rsp->rds_rsp[i].producer_reg)); } for (i = 0; i < ha->hw.num_sds_rings; i++) { QL_DPRINT2((dev, "%s: rcv cntxt sds[%i].consumer_reg = 0x%08x" " sds[%i].intr_mask_reg = 0x%08x\n", __func__, i, rsp->sds_rsp[i].consumer_reg, i, rsp->sds_rsp[i].intr_mask_reg)); } } ha->hw.flags.init_rx_cnxt = 1; return (0); } /* * Name: qla_del_rcv_cntxt * Function: Destroys the Receive Context. */ void qla_del_rcv_cntxt(qla_host_t *ha) { qla_cdrp_t cdrp; device_t dev = ha->pci_dev; if (!ha->hw.flags.init_rx_cnxt) return; bzero(&cdrp, sizeof(qla_cdrp_t)); cdrp.cmd = Q8_CMD_DESTROY_RX_CNTXT; cdrp.cmd_arg1 = (uint32_t) (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id; if (qla_issue_cmd(ha, &cdrp)) { device_printf(dev, "%s: Q8_CMD_DESTROY_RX_CNTXT failed\n", __func__); } ha->hw.flags.init_rx_cnxt = 0; } /* * Name: qla_init_xmt_cntxt * Function: Creates the Transmit Context. */ static int qla_init_xmt_cntxt(qla_host_t *ha) { bus_addr_t phys_addr; device_t dev; q80_tx_cntxt_rsp_t *tx_rsp; qla_cdrp_t cdrp; qla_hw_t *hw = &ha->hw; dev = ha->pci_dev; /* * Create Transmit Context */ phys_addr = ha->hw.tx_cntxt_req_paddr; tx_rsp = ha->hw.tx_cntxt_rsp; hw->txr_comp = hw->txr_next = 0; *(hw->tx_cons) = 0; bzero(&cdrp, sizeof(qla_cdrp_t)); cdrp.cmd = Q8_CMD_CREATE_TX_CNTXT; cdrp.cmd_arg1 = (uint32_t)(phys_addr >> 32); cdrp.cmd_arg2 = (uint32_t)(phys_addr); cdrp.cmd_arg3 = (uint32_t)(sizeof (q80_tx_cntxt_req_t)); if (qla_issue_cmd(ha, &cdrp)) { device_printf(dev, "%s: Q8_CMD_CREATE_TX_CNTXT failed\n", __func__); return (-1); } else { ha->hw.tx_prod_reg = tx_rsp->producer_reg; QL_DPRINT2((dev, "%s: tx cntxt successful" " cntxt_state = 0x%08x " " cntxt_id = 0x%04x " " phys_port_id = 0x%02x " " virt_port_id = 0x%02x " " producer_reg = 0x%08x " " intr_mask_reg = 0x%08x\n", __func__, tx_rsp->cntxt_state, tx_rsp->cntxt_id, tx_rsp->phys_port_id, tx_rsp->virt_port_id, tx_rsp->producer_reg, tx_rsp->intr_mask_reg)); } ha->hw.txr_free = NUM_TX_DESCRIPTORS; ha->hw.flags.init_tx_cnxt = 1; return (0); } /* * Name: qla_del_xmt_cntxt * Function: Destroys the Transmit Context. */ static void qla_del_xmt_cntxt(qla_host_t *ha) { qla_cdrp_t cdrp; device_t dev = ha->pci_dev; if (!ha->hw.flags.init_tx_cnxt) return; bzero(&cdrp, sizeof(qla_cdrp_t)); cdrp.cmd = Q8_CMD_DESTROY_TX_CNTXT; cdrp.cmd_arg1 = (uint32_t) (ha->hw.tx_cntxt_rsp)->cntxt_id; if (qla_issue_cmd(ha, &cdrp)) { device_printf(dev, "%s: Q8_CMD_DESTROY_TX_CNTXT failed\n", __func__); } ha->hw.flags.init_tx_cnxt = 0; } /* * Name: qla_get_max_rds * Function: Returns the maximum number of Receive Descriptor Rings per context. */ static int qla_get_max_rds(qla_host_t *ha) { qla_cdrp_t cdrp; device_t dev; dev = ha->pci_dev; bzero(&cdrp, sizeof(qla_cdrp_t)); cdrp.cmd = Q8_CMD_RD_MAX_RDS_PER_CNTXT; if (qla_issue_cmd(ha, &cdrp)) { device_printf(dev, "%s: Q8_CMD_RD_MAX_RDS_PER_CNTXT failed\n", __func__); return (-1); } else { ha->hw.max_rds_per_cntxt = cdrp.rsp_arg1; QL_DPRINT2((dev, "%s: max_rds_per_context 0x%08x\n", __func__, ha->hw.max_rds_per_cntxt)); } return 0; } /* * Name: qla_get_max_sds * Function: Returns the maximum number of Status Descriptor Rings per context. */ static int qla_get_max_sds(qla_host_t *ha) { qla_cdrp_t cdrp; device_t dev; dev = ha->pci_dev; bzero(&cdrp, sizeof(qla_cdrp_t)); cdrp.cmd = Q8_CMD_RD_MAX_SDS_PER_CNTXT; if (qla_issue_cmd(ha, &cdrp)) { device_printf(dev, "%s: Q8_CMD_RD_MAX_RDS_PER_CNTXT failed\n", __func__); return (-1); } else { ha->hw.max_sds_per_cntxt = cdrp.rsp_arg1; QL_DPRINT2((dev, "%s: max_sds_per_context 0x%08x\n", __func__, ha->hw.max_sds_per_cntxt)); } return 0; } /* * Name: qla_get_max_rules * Function: Returns the maximum number of Rules per context. */ static int qla_get_max_rules(qla_host_t *ha) { qla_cdrp_t cdrp; device_t dev; dev = ha->pci_dev; bzero(&cdrp, sizeof(qla_cdrp_t)); cdrp.cmd = Q8_CMD_RD_MAX_RULES_PER_CNTXT; if (qla_issue_cmd(ha, &cdrp)) { device_printf(dev, "%s: Q8_CMD_RD_MAX_RULES_PER_CNTXT failed\n", __func__); return (-1); } else { ha->hw.max_rules_per_cntxt = cdrp.rsp_arg1; QL_DPRINT2((dev, "%s: max_rules_per_cntxt 0x%08x\n", __func__, ha->hw.max_rules_per_cntxt)); } return 0; } /* * Name: qla_get_max_rcv_cntxts * Function: Returns the maximum number of Receive Contexts supported. */ static int qla_get_max_rcv_cntxts(qla_host_t *ha) { qla_cdrp_t cdrp; device_t dev; dev = ha->pci_dev; bzero(&cdrp, sizeof(qla_cdrp_t)); cdrp.cmd = Q8_CMD_RD_MAX_RX_CNTXT; if (qla_issue_cmd(ha, &cdrp)) { device_printf(dev, "%s: Q8_CMD_RD_MAX_RX_CNTXT failed\n", __func__); return (-1); } else { ha->hw.max_rcv_cntxts = cdrp.rsp_arg1; QL_DPRINT2((dev, "%s: max_rcv_cntxts 0x%08x\n", __func__, ha->hw.max_rcv_cntxts)); } return 0; } /* * Name: qla_get_max_tx_cntxts * Function: Returns the maximum number of Transmit Contexts supported. */ static int qla_get_max_tx_cntxts(qla_host_t *ha) { qla_cdrp_t cdrp; device_t dev; dev = ha->pci_dev; bzero(&cdrp, sizeof(qla_cdrp_t)); cdrp.cmd = Q8_CMD_RD_MAX_TX_CNTXT; if (qla_issue_cmd(ha, &cdrp)) { device_printf(dev, "%s: Q8_CMD_RD_MAX_TX_CNTXT failed\n", __func__); return (-1); } else { ha->hw.max_xmt_cntxts = cdrp.rsp_arg1; QL_DPRINT2((dev, "%s: max_xmt_cntxts 0x%08x\n", __func__, ha->hw.max_xmt_cntxts)); } return 0; } /* * Name: qla_get_max_mtu * Function: Returns the MTU supported for a context. */ static int qla_get_max_mtu(qla_host_t *ha) { qla_cdrp_t cdrp; device_t dev; dev = ha->pci_dev; bzero(&cdrp, sizeof(qla_cdrp_t)); cdrp.cmd = Q8_CMD_RD_MAX_MTU; if (qla_issue_cmd(ha, &cdrp)) { device_printf(dev, "%s: Q8_CMD_RD_MAX_MTU failed\n", __func__); return (-1); } else { ha->hw.max_mtu = cdrp.rsp_arg1; QL_DPRINT2((dev, "%s: max_mtu 0x%08x\n", __func__, ha->hw.max_mtu)); } return 0; } /* * Name: qla_set_max_mtu * Function: * Sets the maximum transfer unit size for the specified rcv context. */ int qla_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id) { qla_cdrp_t cdrp; device_t dev; dev = ha->pci_dev; bzero(&cdrp, sizeof(qla_cdrp_t)); cdrp.cmd = Q8_CMD_SET_MTU; cdrp.cmd_arg1 = (uint32_t)cntxt_id; cdrp.cmd_arg2 = mtu; if (qla_issue_cmd(ha, &cdrp)) { device_printf(dev, "%s: Q8_CMD_RD_MAX_MTU failed\n", __func__); return (-1); } else { ha->hw.max_mtu = cdrp.rsp_arg1; } return 0; } /* * Name: qla_get_max_lro * Function: Returns the maximum number of TCP Connection which can be supported * with LRO. */ static int qla_get_max_lro(qla_host_t *ha) { qla_cdrp_t cdrp; device_t dev; dev = ha->pci_dev; bzero(&cdrp, sizeof(qla_cdrp_t)); cdrp.cmd = Q8_CMD_RD_MAX_LRO; if (qla_issue_cmd(ha, &cdrp)) { device_printf(dev, "%s: Q8_CMD_RD_MAX_LRO failed\n", __func__); return (-1); } else { ha->hw.max_lro = cdrp.rsp_arg1; QL_DPRINT2((dev, "%s: max_lro 0x%08x\n", __func__, ha->hw.max_lro)); } return 0; } /* * Name: qla_get_flow_control * Function: Returns the Receive/Transmit Flow Control (PAUSE) settings for * PCI function. */ static int qla_get_flow_control(qla_host_t *ha) { qla_cdrp_t cdrp; device_t dev; dev = ha->pci_dev; bzero(&cdrp, sizeof(qla_cdrp_t)); cdrp.cmd = Q8_CMD_GET_FLOW_CNTRL; if (qla_issue_cmd(ha, &cdrp)) { device_printf(dev, "%s: Q8_CMD_GET_FLOW_CNTRL failed\n", __func__); return (-1); } else { QL_DPRINT2((dev, "%s: flow control 0x%08x\n", __func__, cdrp.rsp_arg1)); } return 0; } /* * Name: qla_get_flow_control * Function: Retrieves hardware capabilities */ void qla_get_hw_caps(qla_host_t *ha) { //qla_read_mac_addr(ha); qla_get_max_rds(ha); qla_get_max_sds(ha); qla_get_max_rules(ha); qla_get_max_rcv_cntxts(ha); qla_get_max_tx_cntxts(ha); qla_get_max_mtu(ha); qla_get_max_lro(ha); qla_get_flow_control(ha); return; } /* * Name: qla_hw_set_multi * Function: Sets the Multicast Addresses provided the host O.S into the * hardware (for the given interface) */ void qla_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt, uint32_t add_multi) { q80_rcv_cntxt_rsp_t *rsp; int i; rsp = ha->hw.rx_cntxt_rsp; for (i = 0; i < mcnt; i++) { qla_config_mac_addr(ha, mta, rsp->rx_rsp.cntxt_id, add_multi); mta += Q8_MAC_ADDR_LEN; } return; } /* * Name: qla_hw_tx_done_locked * Function: Handle Transmit Completions */ static void qla_hw_tx_done_locked(qla_host_t *ha) { qla_tx_buf_t *txb; qla_hw_t *hw = &ha->hw; uint32_t comp_idx, comp_count = 0; /* retrieve index of last entry in tx ring completed */ comp_idx = qla_le32_to_host(*(hw->tx_cons)); while (comp_idx != hw->txr_comp) { txb = &ha->tx_buf[hw->txr_comp]; hw->txr_comp++; if (hw->txr_comp == NUM_TX_DESCRIPTORS) hw->txr_comp = 0; comp_count++; if (txb->m_head) { bus_dmamap_sync(ha->tx_tag, txb->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ha->tx_tag, txb->map); bus_dmamap_destroy(ha->tx_tag, txb->map); m_freem(txb->m_head); txb->map = (bus_dmamap_t)0; txb->m_head = NULL; } } hw->txr_free += comp_count; QL_DPRINT8((ha->pci_dev, "%s: return [c,f, p, pn][%d, %d, %d, %d]\n", __func__, hw->txr_comp, hw->txr_free, hw->txr_next, READ_REG32(ha, (ha->hw.tx_prod_reg + 0x1b2000)))); return; } /* * Name: qla_hw_tx_done * Function: Handle Transmit Completions */ void qla_hw_tx_done(qla_host_t *ha) { if (!mtx_trylock(&ha->tx_lock)) { QL_DPRINT8((ha->pci_dev, "%s: !mtx_trylock(&ha->tx_lock)\n", __func__)); return; } qla_hw_tx_done_locked(ha); if (ha->hw.txr_free > free_pkt_thres) ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; mtx_unlock(&ha->tx_lock); return; } void qla_update_link_state(qla_host_t *ha) { uint32_t link_state; uint32_t prev_link_state; if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) { ha->hw.flags.link_up = 0; return; } link_state = READ_REG32(ha, Q8_LINK_STATE); prev_link_state = ha->hw.flags.link_up; if (ha->pci_func == 0) ha->hw.flags.link_up = (((link_state & 0xF) == 1)? 1 : 0); else ha->hw.flags.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0); if (prev_link_state != ha->hw.flags.link_up) { if (ha->hw.flags.link_up) { if_link_state_change(ha->ifp, LINK_STATE_UP); } else { if_link_state_change(ha->ifp, LINK_STATE_DOWN); } } } int qla_config_lro(qla_host_t *ha) { +#if defined(INET) || defined(INET6) int i; qla_hw_t *hw = &ha->hw; struct lro_ctrl *lro; for (i = 0; i < hw->num_sds_rings; i++) { lro = &hw->sds[i].lro; if (tcp_lro_init(lro)) { device_printf(ha->pci_dev, "%s: tcp_lro_init failed\n", __func__); return (-1); } lro->ifp = ha->ifp; } ha->flags.lro_init = 1; QL_DPRINT2((ha->pci_dev, "%s: LRO initialized\n", __func__)); +#endif return (0); } void qla_free_lro(qla_host_t *ha) { +#if defined(INET) || defined(INET6) int i; qla_hw_t *hw = &ha->hw; struct lro_ctrl *lro; if (!ha->flags.lro_init) return; for (i = 0; i < hw->num_sds_rings; i++) { lro = &hw->sds[i].lro; tcp_lro_free(lro); } ha->flags.lro_init = 0; +#endif } void qla_hw_stop_rcv(qla_host_t *ha) { int i, done, count = 100; while (count--) { done = 1; for (i = 0; i < ha->hw.num_sds_rings; i++) { if (ha->hw.sds[i].rcv_active) done = 0; } if (done) break; else qla_mdelay(__func__, 10); } } diff --git a/sys/dev/qlxgb/qla_isr.c b/sys/dev/qlxgb/qla_isr.c index 4c030f600bcf..d507ecaf01e1 100644 --- a/sys/dev/qlxgb/qla_isr.c +++ b/sys/dev/qlxgb/qla_isr.c @@ -1,407 +1,412 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2011-2013 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * File: qla_isr.c * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. */ #include __FBSDID("$FreeBSD$"); #include "qla_os.h" #include "qla_reg.h" #include "qla_hw.h" #include "qla_def.h" #include "qla_inline.h" #include "qla_ver.h" #include "qla_glbl.h" #include "qla_dbg.h" static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp); static void qla_replenish_jumbo_rx(qla_host_t *ha, qla_sds_t *sdsp); /* * Name: qla_rx_intr * Function: Handles normal ethernet frames received */ static void qla_rx_intr(qla_host_t *ha, uint64_t data, uint32_t sds_idx, struct lro_ctrl *lro) { uint32_t idx, length, status, ring; qla_rx_buf_t *rxb; struct mbuf *mp; struct ifnet *ifp = ha->ifp; qla_sds_t *sdsp; struct ether_vlan_header *eh; sdsp = &ha->hw.sds[sds_idx]; ring = (uint32_t)Q8_STAT_DESC_TYPE(data); idx = (uint32_t)Q8_STAT_DESC_HANDLE(data); length = (uint32_t)Q8_STAT_DESC_TOTAL_LENGTH(data); status = (uint32_t)Q8_STAT_DESC_STATUS(data); if (ring == 0) { if ((idx >= NUM_RX_DESCRIPTORS) || (length > MCLBYTES)) { device_printf(ha->pci_dev, "%s: ring[%d] index[0x%08x]" " len[0x%08x] invalid\n", __func__, ring, idx, length); return; } } else { if ((idx >= NUM_RX_JUMBO_DESCRIPTORS)||(length > MJUM9BYTES)) { device_printf(ha->pci_dev, "%s: ring[%d] index[0x%08x]" " len[0x%08x] invalid\n", __func__, ring, idx, length); return; } } if (ring == 0) rxb = &ha->rx_buf[idx]; else rxb = &ha->rx_jbuf[idx]; QL_ASSERT((rxb != NULL),\ ("%s: [r, i, sds_idx]=[%d, 0x%x, %d] rxb != NULL\n",\ __func__, ring, idx, sds_idx)); mp = rxb->m_head; QL_ASSERT((mp != NULL),\ ("%s: [r,i,rxb, sds_idx]=[%d, 0x%x, %p, %d] mp != NULL\n",\ __func__, ring, idx, rxb, sds_idx)); bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD); if (ring == 0) { rxb->m_head = NULL; rxb->next = sdsp->rxb_free; sdsp->rxb_free = rxb; sdsp->rx_free++; } else { rxb->m_head = NULL; rxb->next = sdsp->rxjb_free; sdsp->rxjb_free = rxb; sdsp->rxj_free++; } mp->m_len = length; mp->m_pkthdr.len = length; mp->m_pkthdr.rcvif = ifp; eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { uint32_t *data = (uint32_t *)eh; mp->m_pkthdr.ether_vtag = ntohs(eh->evl_tag); mp->m_flags |= M_VLANTAG; *(data + 3) = *(data + 2); *(data + 2) = *(data + 1); *(data + 1) = *data; m_adj(mp, ETHER_VLAN_ENCAP_LEN); } if (status == Q8_STAT_DESC_STATUS_CHKSUM_OK) { mp->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID); } else { mp->m_pkthdr.csum_flags = 0; } +#if defined(INET) || defined(INET6) if (lro->lro_cnt && (tcp_lro_rx(lro, mp, 0) == 0)) { /* LRO packet has been successfully queued */ - } else { + } else +#endif + { (*ifp->if_input)(ifp, mp); } if (sdsp->rx_free > std_replenish) qla_replenish_normal_rx(ha, sdsp); if (sdsp->rxj_free > jumbo_replenish) qla_replenish_jumbo_rx(ha, sdsp); return; } static void qla_replenish_jumbo_rx(qla_host_t *ha, qla_sds_t *sdsp) { qla_rx_buf_t *rxb; int count = jumbo_replenish; uint32_t rxj_next; if (!mtx_trylock(&ha->rxj_lock)) return; rxj_next = ha->hw.rxj_next; while (count--) { rxb = sdsp->rxjb_free; if (rxb == NULL) break; sdsp->rxjb_free = rxb->next; sdsp->rxj_free--; if (qla_get_mbuf(ha, rxb, NULL, RDS_RING_INDEX_JUMBO) == 0) { qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_JUMBO, ha->hw.rxj_in, rxb->handle, rxb->paddr, (rxb->m_head)->m_pkthdr.len); ha->hw.rxj_in++; if (ha->hw.rxj_in == NUM_RX_JUMBO_DESCRIPTORS) ha->hw.rxj_in = 0; ha->hw.rxj_next++; if (ha->hw.rxj_next == NUM_RX_JUMBO_DESCRIPTORS) ha->hw.rxj_next = 0; } else { device_printf(ha->pci_dev, "%s: qla_get_mbuf [1,(%d),(%d)] failed\n", __func__, ha->hw.rxj_in, rxb->handle); rxb->m_head = NULL; rxb->next = sdsp->rxjb_free; sdsp->rxjb_free = rxb; sdsp->rxj_free++; break; } } if (rxj_next != ha->hw.rxj_next) { QL_UPDATE_RDS_PRODUCER_INDEX(ha, 1, ha->hw.rxj_next); } mtx_unlock(&ha->rxj_lock); } static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp) { qla_rx_buf_t *rxb; int count = std_replenish; uint32_t rx_next; if (!mtx_trylock(&ha->rx_lock)) return; rx_next = ha->hw.rx_next; while (count--) { rxb = sdsp->rxb_free; if (rxb == NULL) break; sdsp->rxb_free = rxb->next; sdsp->rx_free--; if (qla_get_mbuf(ha, rxb, NULL, RDS_RING_INDEX_NORMAL) == 0) { qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_NORMAL, ha->hw.rx_in, rxb->handle, rxb->paddr, (rxb->m_head)->m_pkthdr.len); ha->hw.rx_in++; if (ha->hw.rx_in == NUM_RX_DESCRIPTORS) ha->hw.rx_in = 0; ha->hw.rx_next++; if (ha->hw.rx_next == NUM_RX_DESCRIPTORS) ha->hw.rx_next = 0; } else { device_printf(ha->pci_dev, "%s: qla_get_mbuf [0,(%d),(%d)] failed\n", __func__, ha->hw.rx_in, rxb->handle); rxb->m_head = NULL; rxb->next = sdsp->rxb_free; sdsp->rxb_free = rxb; sdsp->rx_free++; break; } } if (rx_next != ha->hw.rx_next) { QL_UPDATE_RDS_PRODUCER_INDEX(ha, 0, ha->hw.rx_next); } mtx_unlock(&ha->rx_lock); } /* * Name: qla_isr * Function: Main Interrupt Service Routine */ static uint32_t qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count) { device_t dev; qla_hw_t *hw; uint32_t comp_idx, desc_count; q80_stat_desc_t *sdesc; struct lro_ctrl *lro; uint32_t ret = 0; dev = ha->pci_dev; hw = &ha->hw; hw->sds[sds_idx].rcv_active = 1; if (ha->flags.stop_rcv) { hw->sds[sds_idx].rcv_active = 0; return 0; } QL_DPRINT2((dev, "%s: [%d]enter\n", __func__, sds_idx)); /* * receive interrupts */ comp_idx = hw->sds[sds_idx].sdsr_next; lro = &hw->sds[sds_idx].lro; while (count--) { sdesc = (q80_stat_desc_t *) &hw->sds[sds_idx].sds_ring_base[comp_idx]; if (Q8_STAT_DESC_OWNER((sdesc->data[0])) != Q8_STAT_DESC_OWNER_HOST) { QL_DPRINT2((dev, "%s: data %p sdsr_next 0x%08x\n", __func__, (void *)sdesc->data[0], comp_idx)); break; } desc_count = Q8_STAT_DESC_COUNT((sdesc->data[0])); switch (Q8_STAT_DESC_OPCODE((sdesc->data[0]))) { case Q8_STAT_DESC_OPCODE_RCV_PKT: case Q8_STAT_DESC_OPCODE_SYN_OFFLOAD: qla_rx_intr(ha, (sdesc->data[0]), sds_idx, lro); break; default: device_printf(dev, "%s: default 0x%llx!\n", __func__, (long long unsigned int)sdesc->data[0]); break; } while (desc_count--) { sdesc->data[0] = Q8_STAT_DESC_SET_OWNER(Q8_STAT_DESC_OWNER_FW); comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1); sdesc = (q80_stat_desc_t *) &hw->sds[sds_idx].sds_ring_base[comp_idx]; } } +#if defined(INET) || defined(INET6) tcp_lro_flush_all(lro); +#endif if (hw->sds[sds_idx].sdsr_next != comp_idx) { QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx); } hw->sds[sds_idx].sdsr_next = comp_idx; sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx]; if ((sds_idx == 0) && (Q8_STAT_DESC_OWNER((sdesc->data[0])) == Q8_STAT_DESC_OWNER_HOST)) { ret = -1; } hw->sds[sds_idx].rcv_active = 0; return (ret); } void qla_isr(void *arg) { qla_ivec_t *ivec = arg; qla_host_t *ha; uint32_t sds_idx; uint32_t ret; ha = ivec->ha; sds_idx = ivec->irq_rid - 1; if (sds_idx >= ha->hw.num_sds_rings) { device_printf(ha->pci_dev, "%s: bogus sds_idx 0x%x\n", __func__, sds_idx); return; } if (sds_idx == 0) taskqueue_enqueue(ha->tx_tq, &ha->tx_task); ret = qla_rcv_isr(ha, sds_idx, rcv_pkt_thres); if (sds_idx == 0) taskqueue_enqueue(ha->tx_tq, &ha->tx_task); if (ret) { taskqueue_enqueue(ha->irq_vec[sds_idx].rcv_tq, &ha->irq_vec[sds_idx].rcv_task); } else { QL_ENABLE_INTERRUPTS(ha, sds_idx); } } void qla_rcv(void *context, int pending) { qla_ivec_t *ivec = context; qla_host_t *ha; qla_hw_t *hw; uint32_t sds_idx; uint32_t ret; struct ifnet *ifp; ha = ivec->ha; hw = &ha->hw; sds_idx = ivec->irq_rid - 1; ifp = ha->ifp; do { if (sds_idx == 0) { if (qla_le32_to_host(*(hw->tx_cons)) != hw->txr_comp) { taskqueue_enqueue(ha->tx_tq, &ha->tx_task); } else if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) { taskqueue_enqueue(ha->tx_tq, &ha->tx_task); } } ret = qla_rcv_isr(ha, sds_idx, rcv_pkt_thres_d); } while (ret); if (sds_idx == 0) taskqueue_enqueue(ha->tx_tq, &ha->tx_task); QL_ENABLE_INTERRUPTS(ha, sds_idx); } diff --git a/sys/dev/qlxgb/qla_os.c b/sys/dev/qlxgb/qla_os.c index a52527476837..bd56eb458c70 100644 --- a/sys/dev/qlxgb/qla_os.c +++ b/sys/dev/qlxgb/qla_os.c @@ -1,1462 +1,1466 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2011-2013 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * File: qla_os.c * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. */ #include __FBSDID("$FreeBSD$"); #include "qla_os.h" #include "qla_reg.h" #include "qla_hw.h" #include "qla_def.h" #include "qla_inline.h" #include "qla_ver.h" #include "qla_glbl.h" #include "qla_dbg.h" /* * Some PCI Configuration Space Related Defines */ #ifndef PCI_VENDOR_QLOGIC #define PCI_VENDOR_QLOGIC 0x1077 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP8020 #define PCI_PRODUCT_QLOGIC_ISP8020 0x8020 #endif #define PCI_QLOGIC_ISP8020 \ ((PCI_PRODUCT_QLOGIC_ISP8020 << 16) | PCI_VENDOR_QLOGIC) /* * static functions */ static int qla_alloc_parent_dma_tag(qla_host_t *ha); static void qla_free_parent_dma_tag(qla_host_t *ha); static int qla_alloc_xmt_bufs(qla_host_t *ha); static void qla_free_xmt_bufs(qla_host_t *ha); static int qla_alloc_rcv_bufs(qla_host_t *ha); static void qla_free_rcv_bufs(qla_host_t *ha); static void qla_init_ifnet(device_t dev, qla_host_t *ha); static int qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS); static void qla_release(qla_host_t *ha); static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error); static void qla_stop(qla_host_t *ha); static int qla_send(qla_host_t *ha, struct mbuf **m_headp); static void qla_tx_done(void *context, int pending); /* * Hooks to the Operating Systems */ static int qla_pci_probe (device_t); static int qla_pci_attach (device_t); static int qla_pci_detach (device_t); static void qla_init(void *arg); static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); static int qla_media_change(struct ifnet *ifp); static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); static device_method_t qla_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, qla_pci_probe), DEVMETHOD(device_attach, qla_pci_attach), DEVMETHOD(device_detach, qla_pci_detach), { 0, 0 } }; static driver_t qla_pci_driver = { "ql", qla_pci_methods, sizeof (qla_host_t), }; static devclass_t qla80xx_devclass; DRIVER_MODULE(qla80xx, pci, qla_pci_driver, qla80xx_devclass, 0, 0); MODULE_DEPEND(qla80xx, pci, 1, 1, 1); MODULE_DEPEND(qla80xx, ether, 1, 1, 1); MALLOC_DEFINE(M_QLA8XXXBUF, "qla80xxbuf", "Buffers for qla80xx driver"); uint32_t std_replenish = 8; uint32_t jumbo_replenish = 2; uint32_t rcv_pkt_thres = 128; uint32_t rcv_pkt_thres_d = 32; uint32_t snd_pkt_thres = 16; uint32_t free_pkt_thres = (NUM_TX_DESCRIPTORS / 2); static char dev_str[64]; /* * Name: qla_pci_probe * Function: Validate the PCI device to be a QLA80XX device */ static int qla_pci_probe(device_t dev) { switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { case PCI_QLOGIC_ISP8020: snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d", "Qlogic ISP 80xx PCI CNA Adapter-Ethernet Function", QLA_VERSION_MAJOR, QLA_VERSION_MINOR, QLA_VERSION_BUILD); device_set_desc(dev, dev_str); break; default: return (ENXIO); } if (bootverbose) printf("%s: %s\n ", __func__, dev_str); return (BUS_PROBE_DEFAULT); } static void qla_add_sysctls(qla_host_t *ha) { device_t dev = ha->pci_dev; SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, (void *)ha, 0, qla_sysctl_get_stats, "I", "Statistics"); SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "fw_version", CTLFLAG_RD, ha->fw_ver_str, 0, "firmware version"); dbg_level = 0; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &dbg_level, dbg_level, "Debug Level"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "std_replenish", CTLFLAG_RW, &std_replenish, std_replenish, "Threshold for Replenishing Standard Frames"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "jumbo_replenish", CTLFLAG_RW, &jumbo_replenish, jumbo_replenish, "Threshold for Replenishing Jumbo Frames"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rcv_pkt_thres", CTLFLAG_RW, &rcv_pkt_thres, rcv_pkt_thres, "Threshold for # of rcv pkts to trigger indication isr"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rcv_pkt_thres_d", CTLFLAG_RW, &rcv_pkt_thres_d, rcv_pkt_thres_d, "Threshold for # of rcv pkts to trigger indication defered"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "snd_pkt_thres", CTLFLAG_RW, &snd_pkt_thres, snd_pkt_thres, "Threshold for # of snd packets"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "free_pkt_thres", CTLFLAG_RW, &free_pkt_thres, free_pkt_thres, "Threshold for # of packets to free at a time"); return; } static void qla_watchdog(void *arg) { qla_host_t *ha = arg; qla_hw_t *hw; struct ifnet *ifp; hw = &ha->hw; ifp = ha->ifp; if (ha->flags.qla_watchdog_exit) return; if (!ha->flags.qla_watchdog_pause) { if (qla_le32_to_host(*(hw->tx_cons)) != hw->txr_comp) { taskqueue_enqueue(ha->tx_tq, &ha->tx_task); } else if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) { taskqueue_enqueue(ha->tx_tq, &ha->tx_task); } } ha->watchdog_ticks = (ha->watchdog_ticks + 1) % 1000; callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, qla_watchdog, ha); } /* * Name: qla_pci_attach * Function: attaches the device to the operating system */ static int qla_pci_attach(device_t dev) { qla_host_t *ha = NULL; uint32_t rsrc_len, i; QL_DPRINT2((dev, "%s: enter\n", __func__)); if ((ha = device_get_softc(dev)) == NULL) { device_printf(dev, "cannot get softc\n"); return (ENOMEM); } memset(ha, 0, sizeof (qla_host_t)); if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8020) { device_printf(dev, "device is not ISP8020\n"); return (ENXIO); } ha->pci_func = pci_get_function(dev); ha->pci_dev = dev; pci_enable_busmaster(dev); ha->reg_rid = PCIR_BAR(0); ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, RF_ACTIVE); if (ha->pci_reg == NULL) { device_printf(dev, "unable to map any ports\n"); goto qla_pci_attach_err; } rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, ha->reg_rid); mtx_init(&ha->hw_lock, "qla80xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); mtx_init(&ha->tx_lock, "qla80xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF); mtx_init(&ha->rx_lock, "qla80xx_rx_lock", MTX_NETWORK_LOCK, MTX_DEF); mtx_init(&ha->rxj_lock, "qla80xx_rxj_lock", MTX_NETWORK_LOCK, MTX_DEF); ha->flags.lock_init = 1; ha->msix_count = pci_msix_count(dev); if (ha->msix_count < qla_get_msix_count(ha)) { device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, ha->msix_count); goto qla_pci_attach_err; } QL_DPRINT2((dev, "%s: ha %p irq %p pci_func 0x%x rsrc_count 0x%08x" " msix_count 0x%x pci_reg %p\n", __func__, ha, ha->irq, ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg)); ha->msix_count = qla_get_msix_count(ha); if (pci_alloc_msix(dev, &ha->msix_count)) { device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__, ha->msix_count); ha->msix_count = 0; goto qla_pci_attach_err; } TASK_INIT(&ha->tx_task, 0, qla_tx_done, ha); ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT, taskqueue_thread_enqueue, &ha->tx_tq); taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq", device_get_nameunit(ha->pci_dev)); for (i = 0; i < ha->msix_count; i++) { ha->irq_vec[i].irq_rid = i+1; ha->irq_vec[i].ha = ha; ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ha->irq_vec[i].irq_rid, (RF_ACTIVE | RF_SHAREABLE)); if (ha->irq_vec[i].irq == NULL) { device_printf(dev, "could not allocate interrupt\n"); goto qla_pci_attach_err; } if (bus_setup_intr(dev, ha->irq_vec[i].irq, (INTR_TYPE_NET | INTR_MPSAFE), NULL, qla_isr, &ha->irq_vec[i], &ha->irq_vec[i].handle)) { device_printf(dev, "could not setup interrupt\n"); goto qla_pci_attach_err; } TASK_INIT(&ha->irq_vec[i].rcv_task, 0, qla_rcv,\ &ha->irq_vec[i]); ha->irq_vec[i].rcv_tq = taskqueue_create_fast("qla_rcvq", M_NOWAIT, taskqueue_thread_enqueue, &ha->irq_vec[i].rcv_tq); taskqueue_start_threads(&ha->irq_vec[i].rcv_tq, 1, PI_NET, "%s rcvq", device_get_nameunit(ha->pci_dev)); } qla_add_sysctls(ha); /* add hardware specific sysctls */ qla_hw_add_sysctls(ha); /* initialize hardware */ if (qla_init_hw(ha)) { device_printf(dev, "%s: qla_init_hw failed\n", __func__); goto qla_pci_attach_err; } device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__, ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, ha->fw_ver_build); snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d", ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, ha->fw_ver_build); //qla_get_hw_caps(ha); qla_read_mac_addr(ha); /* allocate parent dma tag */ if (qla_alloc_parent_dma_tag(ha)) { device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n", __func__); goto qla_pci_attach_err; } /* alloc all dma buffers */ if (qla_alloc_dma(ha)) { device_printf(dev, "%s: qla_alloc_dma failed\n", __func__); goto qla_pci_attach_err; } /* create the o.s ethernet interface */ qla_init_ifnet(dev, ha); ha->flags.qla_watchdog_active = 1; ha->flags.qla_watchdog_pause = 1; callout_init(&ha->tx_callout, 1); /* create ioctl device interface */ if (qla_make_cdev(ha)) { device_printf(dev, "%s: qla_make_cdev failed\n", __func__); goto qla_pci_attach_err; } callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, qla_watchdog, ha); QL_DPRINT2((dev, "%s: exit 0\n", __func__)); return (0); qla_pci_attach_err: qla_release(ha); QL_DPRINT2((dev, "%s: exit ENXIO\n", __func__)); return (ENXIO); } /* * Name: qla_pci_detach * Function: Unhooks the device from the operating system */ static int qla_pci_detach(device_t dev) { qla_host_t *ha = NULL; int i; QL_DPRINT2((dev, "%s: enter\n", __func__)); if ((ha = device_get_softc(dev)) == NULL) { device_printf(dev, "cannot get softc\n"); return (ENOMEM); } QLA_LOCK(ha, __func__); qla_stop(ha); QLA_UNLOCK(ha, __func__); if (ha->tx_tq) { taskqueue_drain(ha->tx_tq, &ha->tx_task); taskqueue_free(ha->tx_tq); } for (i = 0; i < ha->msix_count; i++) { taskqueue_drain(ha->irq_vec[i].rcv_tq, &ha->irq_vec[i].rcv_task); taskqueue_free(ha->irq_vec[i].rcv_tq); } qla_release(ha); QL_DPRINT2((dev, "%s: exit\n", __func__)); return (0); } /* * SYSCTL Related Callbacks */ static int qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS) { int err, ret = 0; qla_host_t *ha; err = sysctl_handle_int(oidp, &ret, 0, req); if (err) return (err); ha = (qla_host_t *)arg1; //qla_get_stats(ha); QL_DPRINT2((ha->pci_dev, "%s: called ret %d\n", __func__, ret)); return (err); } /* * Name: qla_release * Function: Releases the resources allocated for the device */ static void qla_release(qla_host_t *ha) { device_t dev; int i; dev = ha->pci_dev; qla_del_cdev(ha); if (ha->flags.qla_watchdog_active) ha->flags.qla_watchdog_exit = 1; callout_stop(&ha->tx_callout); qla_mdelay(__func__, 100); if (ha->ifp != NULL) ether_ifdetach(ha->ifp); qla_free_dma(ha); qla_free_parent_dma_tag(ha); for (i = 0; i < ha->msix_count; i++) { if (ha->irq_vec[i].handle) (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, ha->irq_vec[i].handle); if (ha->irq_vec[i].irq) (void) bus_release_resource(dev, SYS_RES_IRQ, ha->irq_vec[i].irq_rid, ha->irq_vec[i].irq); } if (ha->msix_count) pci_release_msi(dev); if (ha->flags.lock_init) { mtx_destroy(&ha->tx_lock); mtx_destroy(&ha->rx_lock); mtx_destroy(&ha->rxj_lock); mtx_destroy(&ha->hw_lock); } if (ha->pci_reg) (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, ha->pci_reg); } /* * DMA Related Functions */ static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { *((bus_addr_t *)arg) = 0; if (error) { printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); return; } QL_ASSERT((nsegs == 1), ("%s: %d segments returned!", __func__, nsegs)); *((bus_addr_t *)arg) = segs[0].ds_addr; return; } int qla_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) { int ret = 0; device_t dev; bus_addr_t b_addr; dev = ha->pci_dev; QL_DPRINT2((dev, "%s: enter\n", __func__)); ret = bus_dma_tag_create( ha->parent_tag,/* parent */ dma_buf->alignment, ((bus_size_t)(1ULL << 32)),/* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dma_buf->size, /* maxsize */ 1, /* nsegments */ dma_buf->size, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &dma_buf->dma_tag); if (ret) { device_printf(dev, "%s: could not create dma tag\n", __func__); goto qla_alloc_dmabuf_exit; } ret = bus_dmamem_alloc(dma_buf->dma_tag, (void **)&dma_buf->dma_b, (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), &dma_buf->dma_map); if (ret) { bus_dma_tag_destroy(dma_buf->dma_tag); device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__); goto qla_alloc_dmabuf_exit; } ret = bus_dmamap_load(dma_buf->dma_tag, dma_buf->dma_map, dma_buf->dma_b, dma_buf->size, qla_dmamap_callback, &b_addr, BUS_DMA_NOWAIT); if (ret || !b_addr) { bus_dma_tag_destroy(dma_buf->dma_tag); bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); ret = -1; goto qla_alloc_dmabuf_exit; } dma_buf->dma_addr = b_addr; qla_alloc_dmabuf_exit: QL_DPRINT2((dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n", __func__, ret, (void *)dma_buf->dma_tag, (void *)dma_buf->dma_map, (void *)dma_buf->dma_b, dma_buf->size)); return ret; } void qla_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) { bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); bus_dma_tag_destroy(dma_buf->dma_tag); } static int qla_alloc_parent_dma_tag(qla_host_t *ha) { int ret; device_t dev; dev = ha->pci_dev; /* * Allocate parent DMA Tag */ ret = bus_dma_tag_create( bus_get_dma_tag(dev), /* parent */ 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &ha->parent_tag); if (ret) { device_printf(dev, "%s: could not create parent dma tag\n", __func__); return (-1); } ha->flags.parent_tag = 1; return (0); } static void qla_free_parent_dma_tag(qla_host_t *ha) { if (ha->flags.parent_tag) { bus_dma_tag_destroy(ha->parent_tag); ha->flags.parent_tag = 0; } } /* * Name: qla_init_ifnet * Function: Creates the Network Device Interface and Registers it with the O.S */ static void qla_init_ifnet(device_t dev, qla_host_t *ha) { struct ifnet *ifp; QL_DPRINT2((dev, "%s: enter\n", __func__)); ifp = ha->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_baudrate = IF_Gbps(10); ifp->if_init = qla_init; ifp->if_softc = ha; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = qla_ioctl; ifp->if_start = qla_start; IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha)); ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha); IFQ_SET_READY(&ifp->if_snd); ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; ether_ifattach(ifp, qla_get_mac_addr(ha)); ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_JUMBO_MTU; ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; ifp->if_capabilities |= IFCAP_LINKSTATE; #if defined(__FreeBSD_version) && (__FreeBSD_version < 900002) ifp->if_timer = 0; ifp->if_watchdog = NULL; #endif /* #if defined(__FreeBSD_version) && (__FreeBSD_version < 900002) */ ifp->if_capenable = ifp->if_capabilities; ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status); ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0, NULL); ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); QL_DPRINT2((dev, "%s: exit\n", __func__)); return; } static void qla_init_locked(qla_host_t *ha) { struct ifnet *ifp = ha->ifp; qla_stop(ha); if (qla_alloc_xmt_bufs(ha) != 0) return; if (qla_alloc_rcv_bufs(ha) != 0) return; if (qla_config_lro(ha)) return; bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN); ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO; ha->flags.stop_rcv = 0; if (qla_init_hw_if(ha) == 0) { ifp = ha->ifp; ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ha->flags.qla_watchdog_pause = 0; } return; } static void qla_init(void *arg) { qla_host_t *ha; ha = (qla_host_t *)arg; QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); QLA_LOCK(ha, __func__); qla_init_locked(ha); QLA_UNLOCK(ha, __func__); QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__)); } static u_int qla_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt) { uint8_t *mta = arg; if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS) return (0); bcopy(LLADDR(sdl), &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN); return (1); } static void qla_set_multi(qla_host_t *ha, uint32_t add_multi) { uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN]; struct ifnet *ifp = ha->ifp; int mcnt; mcnt = if_foreach_llmaddr(ifp, qla_copy_maddr, mta); qla_hw_set_multi(ha, mta, mcnt, add_multi); return; } static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { int ret = 0; struct ifreq *ifr = (struct ifreq *)data; +#ifdef INET struct ifaddr *ifa = (struct ifaddr *)data; +#endif qla_host_t *ha; ha = (qla_host_t *)ifp->if_softc; switch (cmd) { case SIOCSIFADDR: QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n", __func__, cmd)); +#ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) { ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { QLA_LOCK(ha, __func__); qla_init_locked(ha); QLA_UNLOCK(ha, __func__); } QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", __func__, cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr))); arp_ifinit(ifp, ifa); if (ntohl(IA_SIN(ifa)->sin_addr.s_addr) != INADDR_ANY) { qla_config_ipv4_addr(ha, (IA_SIN(ifa)->sin_addr.s_addr)); } - } else { - ether_ioctl(ifp, cmd, data); + break; } +#endif + ether_ioctl(ifp, cmd, data); break; case SIOCSIFMTU: QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n", __func__, cmd)); if (ifr->ifr_mtu > QLA_MAX_FRAME_SIZE - ETHER_HDR_LEN) { ret = EINVAL; } else { QLA_LOCK(ha, __func__); ifp->if_mtu = ifr->ifr_mtu; ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { ret = qla_set_max_mtu(ha, ha->max_frame_size, (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id); } QLA_UNLOCK(ha, __func__); if (ret) ret = EINVAL; } break; case SIOCSIFFLAGS: QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n", __func__, cmd)); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { if ((ifp->if_flags ^ ha->if_flags) & IFF_PROMISC) { qla_set_promisc(ha); } else if ((ifp->if_flags ^ ha->if_flags) & IFF_ALLMULTI) { qla_set_allmulti(ha); } } else { QLA_LOCK(ha, __func__); qla_init_locked(ha); ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; ret = qla_set_max_mtu(ha, ha->max_frame_size, (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id); QLA_UNLOCK(ha, __func__); } } else { QLA_LOCK(ha, __func__); if (ifp->if_drv_flags & IFF_DRV_RUNNING) qla_stop(ha); ha->if_flags = ifp->if_flags; QLA_UNLOCK(ha, __func__); } break; case SIOCADDMULTI: QL_DPRINT4((ha->pci_dev, "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd)); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { qla_set_multi(ha, 1); } break; case SIOCDELMULTI: QL_DPRINT4((ha->pci_dev, "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd)); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { qla_set_multi(ha, 0); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", __func__, cmd)); ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); break; case SIOCSIFCAP: { int mask = ifr->ifr_reqcap ^ ifp->if_capenable; QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n", __func__, cmd)); if (mask & IFCAP_HWCSUM) ifp->if_capenable ^= IFCAP_HWCSUM; if (mask & IFCAP_TSO4) ifp->if_capenable ^= IFCAP_TSO4; if (mask & IFCAP_TSO6) ifp->if_capenable ^= IFCAP_TSO6; if (mask & IFCAP_VLAN_HWTAGGING) ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) qla_init(ha); VLAN_CAPABILITIES(ifp); break; } default: QL_DPRINT4((ha->pci_dev, "%s: default (0x%lx)\n", __func__, cmd)); ret = ether_ioctl(ifp, cmd, data); break; } return (ret); } static int qla_media_change(struct ifnet *ifp) { qla_host_t *ha; struct ifmedia *ifm; int ret = 0; ha = (qla_host_t *)ifp->if_softc; QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); ifm = &ha->media; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) ret = EINVAL; QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__)); return (ret); } static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { qla_host_t *ha; ha = (qla_host_t *)ifp->if_softc; QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; qla_update_link_state(ha); if (ha->hw.flags.link_up) { ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha)); } QL_DPRINT2((ha->pci_dev, "%s: exit (%s)\n", __func__,\ (ha->hw.flags.link_up ? "link_up" : "link_down"))); return; } void qla_start(struct ifnet *ifp) { struct mbuf *m_head; qla_host_t *ha = (qla_host_t *)ifp->if_softc; QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__)); if (!mtx_trylock(&ha->tx_lock)) { QL_DPRINT8((ha->pci_dev, "%s: mtx_trylock(&ha->tx_lock) failed\n", __func__)); return; } if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) { QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__)); QLA_TX_UNLOCK(ha); return; } if (!ha->watchdog_ticks) qla_update_link_state(ha); if (!ha->hw.flags.link_up) { QL_DPRINT8((ha->pci_dev, "%s: link down\n", __func__)); QLA_TX_UNLOCK(ha); return; } while (ifp->if_snd.ifq_head != NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) { QL_DPRINT8((ha->pci_dev, "%s: m_head == NULL\n", __func__)); break; } if (qla_send(ha, &m_head)) { if (m_head == NULL) break; QL_DPRINT8((ha->pci_dev, "%s: PREPEND\n", __func__)); ifp->if_drv_flags |= IFF_DRV_OACTIVE; IF_PREPEND(&ifp->if_snd, m_head); break; } /* Send a copy of the frame to the BPF listener */ ETHER_BPF_MTAP(ifp, m_head); } QLA_TX_UNLOCK(ha); QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__)); return; } static int qla_send(qla_host_t *ha, struct mbuf **m_headp) { bus_dma_segment_t segs[QLA_MAX_SEGMENTS]; bus_dmamap_t map; int nsegs; int ret = -1; uint32_t tx_idx; struct mbuf *m_head = *m_headp; QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__)); if ((ret = bus_dmamap_create(ha->tx_tag, BUS_DMA_NOWAIT, &map))) { ha->err_tx_dmamap_create++; device_printf(ha->pci_dev, "%s: bus_dmamap_create failed[%d, %d]\n", __func__, ret, m_head->m_pkthdr.len); return (ret); } ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (ret == EFBIG) { struct mbuf *m; QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__, m_head->m_pkthdr.len)); m = m_defrag(m_head, M_NOWAIT); if (m == NULL) { ha->err_tx_defrag++; m_freem(m_head); *m_headp = NULL; device_printf(ha->pci_dev, "%s: m_defrag() = NULL [%d]\n", __func__, ret); return (ENOBUFS); } m_head = m; if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, BUS_DMA_NOWAIT))) { ha->err_tx_dmamap_load++; device_printf(ha->pci_dev, "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n", __func__, ret, m_head->m_pkthdr.len); bus_dmamap_destroy(ha->tx_tag, map); if (ret != ENOMEM) { m_freem(m_head); *m_headp = NULL; } return (ret); } } else if (ret) { ha->err_tx_dmamap_load++; device_printf(ha->pci_dev, "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n", __func__, ret, m_head->m_pkthdr.len); bus_dmamap_destroy(ha->tx_tag, map); if (ret != ENOMEM) { m_freem(m_head); *m_headp = NULL; } return (ret); } QL_ASSERT((nsegs != 0), ("qla_send: empty packet")); bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); if (!(ret = qla_hw_send(ha, segs, nsegs, &tx_idx, m_head))) { ha->tx_buf[tx_idx].m_head = m_head; ha->tx_buf[tx_idx].map = map; } else { if (ret == EINVAL) { m_freem(m_head); *m_headp = NULL; } } QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__)); return (ret); } static void qla_stop(qla_host_t *ha) { struct ifnet *ifp = ha->ifp; ha->flags.qla_watchdog_pause = 1; qla_mdelay(__func__, 100); ha->flags.stop_rcv = 1; qla_hw_stop_rcv(ha); qla_del_hw_if(ha); qla_free_lro(ha); qla_free_xmt_bufs(ha); qla_free_rcv_bufs(ha); ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); return; } /* * Buffer Management Functions for Transmit and Receive Rings */ static int qla_alloc_xmt_bufs(qla_host_t *ha) { if (bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ QLA_MAX_TSO_FRAME_SIZE, /* maxsize */ QLA_MAX_SEGMENTS, /* nsegments */ PAGE_SIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &ha->tx_tag)) { device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n", __func__); return (ENOMEM); } bzero((void *)ha->tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); return 0; } /* * Release mbuf after it sent on the wire */ static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb) { QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); if (txb->m_head) { bus_dmamap_unload(ha->tx_tag, txb->map); bus_dmamap_destroy(ha->tx_tag, txb->map); m_freem(txb->m_head); txb->m_head = NULL; } QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__)); } static void qla_free_xmt_bufs(qla_host_t *ha) { int i; for (i = 0; i < NUM_TX_DESCRIPTORS; i++) qla_clear_tx_buf(ha, &ha->tx_buf[i]); if (ha->tx_tag != NULL) { bus_dma_tag_destroy(ha->tx_tag); ha->tx_tag = NULL; } bzero((void *)ha->tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); return; } static int qla_alloc_rcv_bufs(qla_host_t *ha) { int i, j, ret = 0; qla_rx_buf_t *rxb; if (bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MJUM9BYTES, /* maxsize */ 1, /* nsegments */ MJUM9BYTES, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &ha->rx_tag)) { device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n", __func__); return (ENOMEM); } bzero((void *)ha->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS)); bzero((void *)ha->rx_jbuf, (sizeof(qla_rx_buf_t) * NUM_RX_JUMBO_DESCRIPTORS)); for (i = 0; i < MAX_SDS_RINGS; i++) { ha->hw.sds[i].sdsr_next = 0; ha->hw.sds[i].rxb_free = NULL; ha->hw.sds[i].rx_free = 0; ha->hw.sds[i].rxjb_free = NULL; ha->hw.sds[i].rxj_free = 0; } for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { rxb = &ha->rx_buf[i]; ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map); if (ret) { device_printf(ha->pci_dev, "%s: dmamap[%d] failed\n", __func__, i); for (j = 0; j < i; j++) { bus_dmamap_destroy(ha->rx_tag, ha->rx_buf[j].map); } goto qla_alloc_rcv_bufs_failed; } } qla_init_hw_rcv_descriptors(ha, RDS_RING_INDEX_NORMAL); for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { rxb = &ha->rx_buf[i]; rxb->handle = i; if (!(ret = qla_get_mbuf(ha, rxb, NULL, 0))) { /* * set the physical address in the corresponding * descriptor entry in the receive ring/queue for the * hba */ qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_NORMAL, i, rxb->handle, rxb->paddr, (rxb->m_head)->m_pkthdr.len); } else { device_printf(ha->pci_dev, "%s: qla_get_mbuf [standard(%d)] failed\n", __func__, i); bus_dmamap_destroy(ha->rx_tag, rxb->map); goto qla_alloc_rcv_bufs_failed; } } for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) { rxb = &ha->rx_jbuf[i]; ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map); if (ret) { device_printf(ha->pci_dev, "%s: dmamap[%d] failed\n", __func__, i); for (j = 0; j < i; j++) { bus_dmamap_destroy(ha->rx_tag, ha->rx_jbuf[j].map); } goto qla_alloc_rcv_bufs_failed; } } qla_init_hw_rcv_descriptors(ha, RDS_RING_INDEX_JUMBO); for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) { rxb = &ha->rx_jbuf[i]; rxb->handle = i; if (!(ret = qla_get_mbuf(ha, rxb, NULL, 1))) { /* * set the physical address in the corresponding * descriptor entry in the receive ring/queue for the * hba */ qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_JUMBO, i, rxb->handle, rxb->paddr, (rxb->m_head)->m_pkthdr.len); } else { device_printf(ha->pci_dev, "%s: qla_get_mbuf [jumbo(%d)] failed\n", __func__, i); bus_dmamap_destroy(ha->rx_tag, rxb->map); goto qla_alloc_rcv_bufs_failed; } } return (0); qla_alloc_rcv_bufs_failed: qla_free_rcv_bufs(ha); return (ret); } static void qla_free_rcv_bufs(qla_host_t *ha) { int i; qla_rx_buf_t *rxb; for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { rxb = &ha->rx_buf[i]; if (rxb->m_head != NULL) { bus_dmamap_unload(ha->rx_tag, rxb->map); bus_dmamap_destroy(ha->rx_tag, rxb->map); m_freem(rxb->m_head); rxb->m_head = NULL; } } for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) { rxb = &ha->rx_jbuf[i]; if (rxb->m_head != NULL) { bus_dmamap_unload(ha->rx_tag, rxb->map); bus_dmamap_destroy(ha->rx_tag, rxb->map); m_freem(rxb->m_head); rxb->m_head = NULL; } } if (ha->rx_tag != NULL) { bus_dma_tag_destroy(ha->rx_tag); ha->rx_tag = NULL; } bzero((void *)ha->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS)); bzero((void *)ha->rx_jbuf, (sizeof(qla_rx_buf_t) * NUM_RX_JUMBO_DESCRIPTORS)); for (i = 0; i < MAX_SDS_RINGS; i++) { ha->hw.sds[i].sdsr_next = 0; ha->hw.sds[i].rxb_free = NULL; ha->hw.sds[i].rx_free = 0; ha->hw.sds[i].rxjb_free = NULL; ha->hw.sds[i].rxj_free = 0; } return; } int qla_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp, uint32_t jumbo) { struct mbuf *mp = nmp; int ret = 0; uint32_t offset; QL_DPRINT2((ha->pci_dev, "%s: jumbo(0x%x) enter\n", __func__, jumbo)); if (mp == NULL) { if (!jumbo) { mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (mp == NULL) { ha->err_m_getcl++; ret = ENOBUFS; device_printf(ha->pci_dev, "%s: m_getcl failed\n", __func__); goto exit_qla_get_mbuf; } mp->m_len = mp->m_pkthdr.len = MCLBYTES; } else { mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); if (mp == NULL) { ha->err_m_getjcl++; ret = ENOBUFS; device_printf(ha->pci_dev, "%s: m_getjcl failed\n", __func__); goto exit_qla_get_mbuf; } mp->m_len = mp->m_pkthdr.len = MJUM9BYTES; } } else { if (!jumbo) mp->m_len = mp->m_pkthdr.len = MCLBYTES; else mp->m_len = mp->m_pkthdr.len = MJUM9BYTES; mp->m_data = mp->m_ext.ext_buf; mp->m_next = NULL; } offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL); if (offset) { offset = 8 - offset; m_adj(mp, offset); } /* * Using memory from the mbuf cluster pool, invoke the bus_dma * machinery to arrange the memory mapping. */ ret = bus_dmamap_load(ha->rx_tag, rxb->map, mtod(mp, void *), mp->m_len, qla_dmamap_callback, &rxb->paddr, BUS_DMA_NOWAIT); if (ret || !rxb->paddr) { m_free(mp); rxb->m_head = NULL; device_printf(ha->pci_dev, "%s: bus_dmamap_load failed\n", __func__); ret = -1; goto exit_qla_get_mbuf; } rxb->m_head = mp; bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD); exit_qla_get_mbuf: QL_DPRINT2((ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret)); return (ret); } static void qla_tx_done(void *context, int pending) { qla_host_t *ha = context; qla_hw_tx_done(ha); qla_start(ha->ifp); } diff --git a/sys/dev/qlxgb/qla_os.h b/sys/dev/qlxgb/qla_os.h index 1ee4f8d1ce61..fddae501b8b3 100644 --- a/sys/dev/qlxgb/qla_os.h +++ b/sys/dev/qlxgb/qla_os.h @@ -1,178 +1,180 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2011-2013 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * File: qla_os.h * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. */ #ifndef _QLA_OS_H_ #define _QLA_OS_H_ +#include "opt_inet.h" + #include #include #include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version < 700112 #error FreeBSD Version not supported - use version >= 700112 #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define QLA_USEC_DELAY(usec) DELAY(usec) static __inline int qla_ms_to_hz(int ms) { int qla_hz; struct timeval t; t.tv_sec = ms / 1000; t.tv_usec = (ms % 1000) * 1000; qla_hz = tvtohz(&t); if (qla_hz < 0) qla_hz = 0x7fffffff; if (!qla_hz) qla_hz = 1; return (qla_hz); } static __inline int qla_sec_to_hz(int sec) { struct timeval t; t.tv_sec = sec; t.tv_usec = 0; return (tvtohz(&t)); } #define qla_host_to_le16(x) htole16(x) #define qla_host_to_le32(x) htole32(x) #define qla_host_to_le64(x) htole64(x) #define qla_host_to_be16(x) htobe16(x) #define qla_host_to_be32(x) htobe32(x) #define qla_host_to_be64(x) htobe64(x) #define qla_le16_to_host(x) le16toh(x) #define qla_le32_to_host(x) le32toh(x) #define qla_le64_to_host(x) le64toh(x) #define qla_be16_to_host(x) be16toh(x) #define qla_be32_to_host(x) be32toh(x) #define qla_be64_to_host(x) be64toh(x) MALLOC_DECLARE(M_QLA8XXXBUF); #define qla_mdelay(fn, msecs) \ {\ if (cold) \ DELAY((msecs * 1000)); \ else \ pause(fn, qla_ms_to_hz(msecs)); \ } /* * Locks */ #define QLA_LOCK(ha, str) qla_lock(ha, str); #define QLA_UNLOCK(ha, str) qla_unlock(ha, str) #define QLA_TX_LOCK(ha) mtx_lock(&ha->tx_lock); #define QLA_TX_UNLOCK(ha) mtx_unlock(&ha->tx_lock); #define QLA_RX_LOCK(ha) mtx_lock(&ha->rx_lock); #define QLA_RX_UNLOCK(ha) mtx_unlock(&ha->rx_lock); #define QLA_RXJ_LOCK(ha) mtx_lock(&ha->rxj_lock); #define QLA_RXJ_UNLOCK(ha) mtx_unlock(&ha->rxj_lock); /* * structure encapsulating a DMA buffer */ struct qla_dma { bus_size_t alignment; uint32_t size; void *dma_b; bus_addr_t dma_addr; bus_dmamap_t dma_map; bus_dma_tag_t dma_tag; }; typedef struct qla_dma qla_dma_t; #define QL_ASSERT(x, y) if (!x) panic y #endif /* #ifndef _QLA_OS_H_ */ diff --git a/sys/dev/qlxgbe/ql_hw.c b/sys/dev/qlxgbe/ql_hw.c index 816e86a2c94c..340fc779b10c 100644 --- a/sys/dev/qlxgbe/ql_hw.c +++ b/sys/dev/qlxgbe/ql_hw.c @@ -1,5584 +1,5590 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013-2016 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * File: ql_hw.c * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. * Content: Contains Hardware dependent functions */ #include __FBSDID("$FreeBSD$"); #include "ql_os.h" #include "ql_hw.h" #include "ql_def.h" #include "ql_inline.h" #include "ql_ver.h" #include "ql_glbl.h" #include "ql_dbg.h" #include "ql_minidump.h" /* * Static Functions */ static void qla_del_rcv_cntxt(qla_host_t *ha); static int qla_init_rcv_cntxt(qla_host_t *ha); static int qla_del_xmt_cntxt(qla_host_t *ha); static int qla_init_xmt_cntxt(qla_host_t *ha); static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox, uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause); static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs, uint32_t create); static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id); static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable, int rcv); static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode); static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id); static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr); static int qla_hw_add_all_mcast(qla_host_t *ha); static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds); static int qla_init_nic_func(qla_host_t *ha); static int qla_stop_nic_func(qla_host_t *ha); static int qla_query_fw_dcbx_caps(qla_host_t *ha); static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits); static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits); static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode); static int qla_get_cam_search_mode(qla_host_t *ha); static void ql_minidump_free(qla_host_t *ha); #ifdef QL_DBG static void qla_stop_pegs(qla_host_t *ha) { uint32_t val = 1; ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0); ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0); ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0); ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0); ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0); device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__); } static int qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS) { int err, ret = 0; qla_host_t *ha; err = sysctl_handle_int(oidp, &ret, 0, req); if (err || !req->newptr) return (err); if (ret == 1) { ha = (qla_host_t *)arg1; if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) { qla_stop_pegs(ha); QLA_UNLOCK(ha, __func__); } } return err; } #endif /* #ifdef QL_DBG */ static int qla_validate_set_port_cfg_bit(uint32_t bits) { if ((bits & 0xF) > 1) return (-1); if (((bits >> 4) & 0xF) > 2) return (-1); if (((bits >> 8) & 0xF) > 2) return (-1); return (0); } static int qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS) { int err, ret = 0; qla_host_t *ha; uint32_t cfg_bits; err = sysctl_handle_int(oidp, &ret, 0, req); if (err || !req->newptr) return (err); ha = (qla_host_t *)arg1; if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) { err = qla_get_port_config(ha, &cfg_bits); if (err) goto qla_sysctl_set_port_cfg_exit; if (ret & 0x1) { cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE; } else { cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE; } ret = ret >> 4; cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK; if ((ret & 0xF) == 0) { cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED; } else if ((ret & 0xF) == 1){ cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD; } else { cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM; } ret = ret >> 4; cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK; if (ret == 0) { cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV; } else if (ret == 1){ cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT; } else { cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV; } if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) { err = qla_set_port_config(ha, cfg_bits); QLA_UNLOCK(ha, __func__); } else { device_printf(ha->pci_dev, "%s: failed\n", __func__); } } else { if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) { err = qla_get_port_config(ha, &cfg_bits); QLA_UNLOCK(ha, __func__); } else { device_printf(ha->pci_dev, "%s: failed\n", __func__); } } qla_sysctl_set_port_cfg_exit: return err; } static int qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS) { int err, ret = 0; qla_host_t *ha; err = sysctl_handle_int(oidp, &ret, 0, req); if (err || !req->newptr) return (err); ha = (qla_host_t *)arg1; if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) || (ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) { if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) { err = qla_set_cam_search_mode(ha, (uint32_t)ret); QLA_UNLOCK(ha, __func__); } else { device_printf(ha->pci_dev, "%s: failed\n", __func__); } } else { device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret); } return (err); } static int qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS) { int err, ret = 0; qla_host_t *ha; err = sysctl_handle_int(oidp, &ret, 0, req); if (err || !req->newptr) return (err); ha = (qla_host_t *)arg1; if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) { err = qla_get_cam_search_mode(ha); QLA_UNLOCK(ha, __func__); } else { device_printf(ha->pci_dev, "%s: failed\n", __func__); } return (err); } static void qlnx_add_hw_mac_stats_sysctls(qla_host_t *ha) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid *ctx_oid; ctx = device_get_sysctl_ctx(ha->pci_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_hw_mac"); children = SYSCTL_CHILDREN(ctx_oid); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_frames", CTLFLAG_RD, &ha->hw.mac.xmt_frames, "xmt_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_bytes", CTLFLAG_RD, &ha->hw.mac.xmt_bytes, "xmt_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_mcast_pkts", CTLFLAG_RD, &ha->hw.mac.xmt_mcast_pkts, "xmt_mcast_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_bcast_pkts", CTLFLAG_RD, &ha->hw.mac.xmt_bcast_pkts, "xmt_bcast_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_pause_frames", CTLFLAG_RD, &ha->hw.mac.xmt_pause_frames, "xmt_pause_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_cntrl_pkts", CTLFLAG_RD, &ha->hw.mac.xmt_cntrl_pkts, "xmt_cntrl_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_pkt_lt_64bytes", CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_64bytes, "xmt_pkt_lt_64bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_pkt_lt_127bytes", CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_127bytes, "xmt_pkt_lt_127bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_pkt_lt_255bytes", CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_255bytes, "xmt_pkt_lt_255bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_pkt_lt_511bytes", CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_511bytes, "xmt_pkt_lt_511bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_pkt_lt_1023bytes", CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1023bytes, "xmt_pkt_lt_1023bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_pkt_lt_1518bytes", CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1518bytes, "xmt_pkt_lt_1518bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_pkt_gt_1518bytes", CTLFLAG_RD, &ha->hw.mac.xmt_pkt_gt_1518bytes, "xmt_pkt_gt_1518bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_frames", CTLFLAG_RD, &ha->hw.mac.rcv_frames, "rcv_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_bytes", CTLFLAG_RD, &ha->hw.mac.rcv_bytes, "rcv_bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_mcast_pkts", CTLFLAG_RD, &ha->hw.mac.rcv_mcast_pkts, "rcv_mcast_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_bcast_pkts", CTLFLAG_RD, &ha->hw.mac.rcv_bcast_pkts, "rcv_bcast_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_pause_frames", CTLFLAG_RD, &ha->hw.mac.rcv_pause_frames, "rcv_pause_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_cntrl_pkts", CTLFLAG_RD, &ha->hw.mac.rcv_cntrl_pkts, "rcv_cntrl_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_pkt_lt_64bytes", CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_64bytes, "rcv_pkt_lt_64bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_pkt_lt_127bytes", CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_127bytes, "rcv_pkt_lt_127bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_pkt_lt_255bytes", CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_255bytes, "rcv_pkt_lt_255bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_pkt_lt_511bytes", CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_511bytes, "rcv_pkt_lt_511bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_pkt_lt_1023bytes", CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1023bytes, "rcv_pkt_lt_1023bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_pkt_lt_1518bytes", CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1518bytes, "rcv_pkt_lt_1518bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_pkt_gt_1518bytes", CTLFLAG_RD, &ha->hw.mac.rcv_pkt_gt_1518bytes, "rcv_pkt_gt_1518bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_len_error", CTLFLAG_RD, &ha->hw.mac.rcv_len_error, "rcv_len_error"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_len_small", CTLFLAG_RD, &ha->hw.mac.rcv_len_small, "rcv_len_small"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_len_large", CTLFLAG_RD, &ha->hw.mac.rcv_len_large, "rcv_len_large"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_jabber", CTLFLAG_RD, &ha->hw.mac.rcv_jabber, "rcv_jabber"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_dropped", CTLFLAG_RD, &ha->hw.mac.rcv_dropped, "rcv_dropped"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "fcs_error", CTLFLAG_RD, &ha->hw.mac.fcs_error, "fcs_error"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "align_error", CTLFLAG_RD, &ha->hw.mac.align_error, "align_error"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "eswitched_frames", CTLFLAG_RD, &ha->hw.mac.eswitched_frames, "eswitched_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "eswitched_bytes", CTLFLAG_RD, &ha->hw.mac.eswitched_bytes, "eswitched_bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "eswitched_mcast_frames", CTLFLAG_RD, &ha->hw.mac.eswitched_mcast_frames, "eswitched_mcast_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "eswitched_bcast_frames", CTLFLAG_RD, &ha->hw.mac.eswitched_bcast_frames, "eswitched_bcast_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "eswitched_ucast_frames", CTLFLAG_RD, &ha->hw.mac.eswitched_ucast_frames, "eswitched_ucast_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "eswitched_err_free_frames", CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_frames, "eswitched_err_free_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "eswitched_err_free_bytes", CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_bytes, "eswitched_err_free_bytes"); return; } static void qlnx_add_hw_rcv_stats_sysctls(qla_host_t *ha) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid *ctx_oid; ctx = device_get_sysctl_ctx(ha->pci_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_rcv", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_hw_rcv"); children = SYSCTL_CHILDREN(ctx_oid); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "total_bytes", CTLFLAG_RD, &ha->hw.rcv.total_bytes, "total_bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "total_pkts", CTLFLAG_RD, &ha->hw.rcv.total_pkts, "total_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "lro_pkt_count", CTLFLAG_RD, &ha->hw.rcv.lro_pkt_count, "lro_pkt_count"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "sw_pkt_count", CTLFLAG_RD, &ha->hw.rcv.sw_pkt_count, "sw_pkt_count"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "ip_chksum_err", CTLFLAG_RD, &ha->hw.rcv.ip_chksum_err, "ip_chksum_err"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "pkts_wo_acntxts", CTLFLAG_RD, &ha->hw.rcv.pkts_wo_acntxts, "pkts_wo_acntxts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "pkts_dropped_no_sds_card", CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_card, "pkts_dropped_no_sds_card"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "pkts_dropped_no_sds_host", CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_host, "pkts_dropped_no_sds_host"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "oversized_pkts", CTLFLAG_RD, &ha->hw.rcv.oversized_pkts, "oversized_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "pkts_dropped_no_rds", CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_rds, "pkts_dropped_no_rds"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "unxpctd_mcast_pkts", CTLFLAG_RD, &ha->hw.rcv.unxpctd_mcast_pkts, "unxpctd_mcast_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "re1_fbq_error", CTLFLAG_RD, &ha->hw.rcv.re1_fbq_error, "re1_fbq_error"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "invalid_mac_addr", CTLFLAG_RD, &ha->hw.rcv.invalid_mac_addr, "invalid_mac_addr"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rds_prime_trys", CTLFLAG_RD, &ha->hw.rcv.rds_prime_trys, "rds_prime_trys"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rds_prime_success", CTLFLAG_RD, &ha->hw.rcv.rds_prime_success, "rds_prime_success"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "lro_flows_added", CTLFLAG_RD, &ha->hw.rcv.lro_flows_added, "lro_flows_added"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "lro_flows_deleted", CTLFLAG_RD, &ha->hw.rcv.lro_flows_deleted, "lro_flows_deleted"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "lro_flows_active", CTLFLAG_RD, &ha->hw.rcv.lro_flows_active, "lro_flows_active"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "pkts_droped_unknown", CTLFLAG_RD, &ha->hw.rcv.pkts_droped_unknown, "pkts_droped_unknown"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "pkts_cnt_oversized", CTLFLAG_RD, &ha->hw.rcv.pkts_cnt_oversized, "pkts_cnt_oversized"); return; } static void qlnx_add_hw_xmt_stats_sysctls(qla_host_t *ha) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid_list *node_children; struct sysctl_oid *ctx_oid; int i; uint8_t name_str[16]; ctx = device_get_sysctl_ctx(ha->pci_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_xmt", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_hw_xmt"); children = SYSCTL_CHILDREN(ctx_oid); for (i = 0; i < ha->hw.num_tx_rings; i++) { bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); snprintf(name_str, sizeof(name_str), "%d", i); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str); node_children = SYSCTL_CHILDREN(ctx_oid); /* Tx Related */ SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "total_bytes", CTLFLAG_RD, &ha->hw.xmt[i].total_bytes, "total_bytes"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "total_pkts", CTLFLAG_RD, &ha->hw.xmt[i].total_pkts, "total_pkts"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "errors", CTLFLAG_RD, &ha->hw.xmt[i].errors, "errors"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "pkts_dropped", CTLFLAG_RD, &ha->hw.xmt[i].pkts_dropped, "pkts_dropped"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "switch_pkts", CTLFLAG_RD, &ha->hw.xmt[i].switch_pkts, "switch_pkts"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "num_buffers", CTLFLAG_RD, &ha->hw.xmt[i].num_buffers, "num_buffers"); } return; } static void qlnx_add_hw_mbx_cmpl_stats_sysctls(qla_host_t *ha) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *node_children; ctx = device_get_sysctl_ctx(ha->pci_dev); node_children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_lt_200ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[0], "mbx_completion_time_lt_200ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_200ms_400ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[1], "mbx_completion_time_200ms_400ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_400ms_600ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[2], "mbx_completion_time_400ms_600ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_600ms_800ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[3], "mbx_completion_time_600ms_800ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_800ms_1000ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[4], "mbx_completion_time_800ms_1000ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_1000ms_1200ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[5], "mbx_completion_time_1000ms_1200ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_1200ms_1400ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[6], "mbx_completion_time_1200ms_1400ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_1400ms_1600ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[7], "mbx_completion_time_1400ms_1600ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_1600ms_1800ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[8], "mbx_completion_time_1600ms_1800ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_1800ms_2000ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[9], "mbx_completion_time_1800ms_2000ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_2000ms_2200ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[10], "mbx_completion_time_2000ms_2200ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_2200ms_2400ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[11], "mbx_completion_time_2200ms_2400ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_2400ms_2600ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[12], "mbx_completion_time_2400ms_2600ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_2600ms_2800ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[13], "mbx_completion_time_2600ms_2800ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_2800ms_3000ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[14], "mbx_completion_time_2800ms_3000ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_3000ms_4000ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[15], "mbx_completion_time_3000ms_4000ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_4000ms_5000ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[16], "mbx_completion_time_4000ms_5000ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_host_mbx_cntrl_timeout", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[17], "mbx_completion_host_mbx_cntrl_timeout"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_fw_mbx_cntrl_timeout", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[18], "mbx_completion_fw_mbx_cntrl_timeout"); return; } static void qlnx_add_hw_stats_sysctls(qla_host_t *ha) { qlnx_add_hw_mac_stats_sysctls(ha); qlnx_add_hw_rcv_stats_sysctls(ha); qlnx_add_hw_xmt_stats_sysctls(ha); qlnx_add_hw_mbx_cmpl_stats_sysctls(ha); return; } static void qlnx_add_drvr_sds_stats(qla_host_t *ha) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid_list *node_children; struct sysctl_oid *ctx_oid; int i; uint8_t name_str[16]; ctx = device_get_sysctl_ctx(ha->pci_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_sds", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_drvr_sds"); children = SYSCTL_CHILDREN(ctx_oid); for (i = 0; i < ha->hw.num_sds_rings; i++) { bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); snprintf(name_str, sizeof(name_str), "%d", i); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str); node_children = SYSCTL_CHILDREN(ctx_oid); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "intr_count", CTLFLAG_RD, &ha->hw.sds[i].intr_count, "intr_count"); SYSCTL_ADD_UINT(ctx, node_children, OID_AUTO, "rx_free", CTLFLAG_RD, &ha->hw.sds[i].rx_free, ha->hw.sds[i].rx_free, "rx_free"); } return; } static void qlnx_add_drvr_rds_stats(qla_host_t *ha) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid_list *node_children; struct sysctl_oid *ctx_oid; int i; uint8_t name_str[16]; ctx = device_get_sysctl_ctx(ha->pci_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_rds", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_drvr_rds"); children = SYSCTL_CHILDREN(ctx_oid); for (i = 0; i < ha->hw.num_rds_rings; i++) { bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); snprintf(name_str, sizeof(name_str), "%d", i); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str); node_children = SYSCTL_CHILDREN(ctx_oid); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "count", CTLFLAG_RD, &ha->hw.rds[i].count, "count"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "lro_pkt_count", CTLFLAG_RD, &ha->hw.rds[i].lro_pkt_count, "lro_pkt_count"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "lro_bytes", CTLFLAG_RD, &ha->hw.rds[i].lro_bytes, "lro_bytes"); } return; } static void qlnx_add_drvr_tx_stats(qla_host_t *ha) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid_list *node_children; struct sysctl_oid *ctx_oid; int i; uint8_t name_str[16]; ctx = device_get_sysctl_ctx(ha->pci_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_xmt", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_drvr_xmt"); children = SYSCTL_CHILDREN(ctx_oid); for (i = 0; i < ha->hw.num_tx_rings; i++) { bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); snprintf(name_str, sizeof(name_str), "%d", i); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str); node_children = SYSCTL_CHILDREN(ctx_oid); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "count", CTLFLAG_RD, &ha->tx_ring[i].count, "count"); #ifdef QL_ENABLE_ISCSI_TLV SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "iscsi_pkt_count", CTLFLAG_RD, &ha->tx_ring[i].iscsi_pkt_count, "iscsi_pkt_count"); #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ } return; } static void qlnx_add_drvr_stats_sysctls(qla_host_t *ha) { qlnx_add_drvr_sds_stats(ha); qlnx_add_drvr_rds_stats(ha); qlnx_add_drvr_tx_stats(ha); return; } /* * Name: ql_hw_add_sysctls * Function: Add P3Plus specific sysctls */ void ql_hw_add_sysctls(qla_host_t *ha) { device_t dev; dev = ha->pci_dev; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings, ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings, ha->hw.num_sds_rings, "Number of Status Descriptor Rings"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings, ha->hw.num_tx_rings, "Number of Transmit Rings"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx, ha->txr_idx, "Tx Ring Used"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs, ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt"); ha->hw.sds_cidx_thres = 32; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres, ha->hw.sds_cidx_thres, "Number of SDS entries to process before updating" " SDS Ring Consumer Index"); ha->hw.rds_pidx_thres = 32; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres, ha->hw.rds_pidx_thres, "Number of Rcv Rings Entries to post before updating" " RDS Ring Producer Index"); ha->hw.rcv_intr_coalesce = (3 << 16) | 256; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW, &ha->hw.rcv_intr_coalesce, ha->hw.rcv_intr_coalesce, "Rcv Intr Coalescing Parameters\n" "\tbits 15:0 max packets\n" "\tbits 31:16 max micro-seconds to wait\n" "\tplease run\n" "\tifconfig down && ifconfig up\n" "\tto take effect \n"); ha->hw.xmt_intr_coalesce = (64 << 16) | 64; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW, &ha->hw.xmt_intr_coalesce, ha->hw.xmt_intr_coalesce, "Xmt Intr Coalescing Parameters\n" "\tbits 15:0 max packets\n" "\tbits 31:16 max micro-seconds to wait\n" "\tplease run\n" "\tifconfig down && ifconfig up\n" "\tto take effect \n"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0, qla_sysctl_port_cfg, "I", "Set Port Configuration if values below " "otherwise Get Port Configuration\n" "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n" "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n" "\tBits 8-11: std pause cfg; 0 = xmt and rcv;" " 1 = xmt only; 2 = rcv only;\n"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0, qla_sysctl_set_cam_search_mode, "I", "Set CAM Search Mode" "\t 1 = search mode internal\n" "\t 2 = search mode auto\n"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "get_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0, qla_sysctl_get_cam_search_mode, "I", "Get CAM Search Mode" "\t 1 = search mode internal\n" "\t 2 = search mode auto\n"); ha->hw.enable_9kb = 1; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb, ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000"); ha->hw.enable_hw_lro = 1; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "enable_hw_lro", CTLFLAG_RW, &ha->hw.enable_hw_lro, ha->hw.enable_hw_lro, "Enable Hardware LRO; Default is true \n" "\t 1 : Hardware LRO if LRO is enabled\n" "\t 0 : Software LRO if LRO is enabled\n" "\t Any change requires ifconfig down/up to take effect\n" "\t Note that LRO may be turned off/on via ifconfig\n"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "sp_log_index", CTLFLAG_RW, &ha->hw.sp_log_index, ha->hw.sp_log_index, "sp_log_index"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "sp_log_stop", CTLFLAG_RW, &ha->hw.sp_log_stop, ha->hw.sp_log_stop, "sp_log_stop"); ha->hw.sp_log_stop_events = 0; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "sp_log_stop_events", CTLFLAG_RW, &ha->hw.sp_log_stop_events, ha->hw.sp_log_stop_events, "Slow path event log is stopped" " when OR of the following events occur \n" "\t 0x01 : Heart beat Failure\n" "\t 0x02 : Temperature Failure\n" "\t 0x04 : HW Initialization Failure\n" "\t 0x08 : Interface Initialization Failure\n" "\t 0x10 : Error Recovery Failure\n"); ha->hw.mdump_active = 0; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active, ha->hw.mdump_active, "Minidump retrieval is Active"); ha->hw.mdump_done = 0; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "mdump_done", CTLFLAG_RW, &ha->hw.mdump_done, ha->hw.mdump_done, "Minidump has been done and available for retrieval"); ha->hw.mdump_capture_mask = 0xF; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "minidump_capture_mask", CTLFLAG_RW, &ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask, "Minidump capture mask"); #ifdef QL_DBG ha->err_inject = 0; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "err_inject", CTLFLAG_RW, &ha->err_inject, ha->err_inject, "Error to be injected\n" "\t\t\t 0: No Errors\n" "\t\t\t 1: rcv: rxb struct invalid\n" "\t\t\t 2: rcv: mp == NULL\n" "\t\t\t 3: lro: rxb struct invalid\n" "\t\t\t 4: lro: mp == NULL\n" "\t\t\t 5: rcv: num handles invalid\n" "\t\t\t 6: reg: indirect reg rd_wr failure\n" "\t\t\t 7: ocm: offchip memory rd_wr failure\n" "\t\t\t 8: mbx: mailbox command failure\n" "\t\t\t 9: heartbeat failure\n" "\t\t\t A: temperature failure\n" "\t\t\t 11: m_getcl or m_getjcl failure\n" "\t\t\t 13: Invalid Descriptor Count in SGL Receive\n" "\t\t\t 14: Invalid Descriptor Count in LRO Receive\n" "\t\t\t 15: peer port error recovery failure\n" "\t\t\t 16: tx_buf[next_prod_index].mbuf != NULL\n" ); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0, qla_sysctl_stop_pegs, "I", "Peg Stop"); #endif /* #ifdef QL_DBG */ ha->hw.user_pri_nic = 0; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic, ha->hw.user_pri_nic, "VLAN Tag User Priority for Normal Ethernet Packets"); ha->hw.user_pri_iscsi = 4; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi, ha->hw.user_pri_iscsi, "VLAN Tag User Priority for iSCSI Packets"); qlnx_add_hw_stats_sysctls(ha); qlnx_add_drvr_stats_sysctls(ha); return; } void ql_hw_link_status(qla_host_t *ha) { device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui); if (ha->hw.link_up) { device_printf(ha->pci_dev, "link Up\n"); } else { device_printf(ha->pci_dev, "link Down\n"); } if (ha->hw.fduplex) { device_printf(ha->pci_dev, "Full Duplex\n"); } else { device_printf(ha->pci_dev, "Half Duplex\n"); } if (ha->hw.autoneg) { device_printf(ha->pci_dev, "Auto Negotiation Enabled\n"); } else { device_printf(ha->pci_dev, "Auto Negotiation Disabled\n"); } switch (ha->hw.link_speed) { case 0x710: device_printf(ha->pci_dev, "link speed\t\t 10Gps\n"); break; case 0x3E8: device_printf(ha->pci_dev, "link speed\t\t 1Gps\n"); break; case 0x64: device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n"); break; default: device_printf(ha->pci_dev, "link speed\t\t Unknown\n"); break; } switch (ha->hw.module_type) { case 0x01: device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n"); break; case 0x02: device_printf(ha->pci_dev, "Module Type 10GBase-LR\n"); break; case 0x03: device_printf(ha->pci_dev, "Module Type 10GBase-SR\n"); break; case 0x04: device_printf(ha->pci_dev, "Module Type 10GE Passive Copper(Compliant)[%d m]\n", ha->hw.cable_length); break; case 0x05: device_printf(ha->pci_dev, "Module Type 10GE Active" " Limiting Copper(Compliant)[%d m]\n", ha->hw.cable_length); break; case 0x06: device_printf(ha->pci_dev, "Module Type 10GE Passive Copper" " (Legacy, Best Effort)[%d m]\n", ha->hw.cable_length); break; case 0x07: device_printf(ha->pci_dev, "Module Type 1000Base-SX\n"); break; case 0x08: device_printf(ha->pci_dev, "Module Type 1000Base-LX\n"); break; case 0x09: device_printf(ha->pci_dev, "Module Type 1000Base-CX\n"); break; case 0x0A: device_printf(ha->pci_dev, "Module Type 1000Base-T\n"); break; case 0x0B: device_printf(ha->pci_dev, "Module Type 1GE Passive Copper" "(Legacy, Best Effort)\n"); break; default: device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n", ha->hw.module_type); break; } if (ha->hw.link_faults == 1) device_printf(ha->pci_dev, "SFP Power Fault\n"); } /* * Name: ql_free_dma * Function: Frees the DMA'able memory allocated in ql_alloc_dma() */ void ql_free_dma(qla_host_t *ha) { uint32_t i; if (ha->hw.dma_buf.flags.sds_ring) { for (i = 0; i < ha->hw.num_sds_rings; i++) { ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]); } ha->hw.dma_buf.flags.sds_ring = 0; } if (ha->hw.dma_buf.flags.rds_ring) { for (i = 0; i < ha->hw.num_rds_rings; i++) { ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]); } ha->hw.dma_buf.flags.rds_ring = 0; } if (ha->hw.dma_buf.flags.tx_ring) { ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring); ha->hw.dma_buf.flags.tx_ring = 0; } ql_minidump_free(ha); } /* * Name: ql_alloc_dma * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts. */ int ql_alloc_dma(qla_host_t *ha) { device_t dev; uint32_t i, j, size, tx_ring_size; qla_hw_t *hw; qla_hw_tx_cntxt_t *tx_cntxt; uint8_t *vaddr; bus_addr_t paddr; dev = ha->pci_dev; QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); hw = &ha->hw; /* * Allocate Transmit Ring */ tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS); size = (tx_ring_size * ha->hw.num_tx_rings); hw->dma_buf.tx_ring.alignment = 8; hw->dma_buf.tx_ring.size = size + PAGE_SIZE; if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) { device_printf(dev, "%s: tx ring alloc failed\n", __func__); goto ql_alloc_dma_exit; } vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b; paddr = hw->dma_buf.tx_ring.dma_addr; for (i = 0; i < ha->hw.num_tx_rings; i++) { tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i]; tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr; tx_cntxt->tx_ring_paddr = paddr; vaddr += tx_ring_size; paddr += tx_ring_size; } for (i = 0; i < ha->hw.num_tx_rings; i++) { tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i]; tx_cntxt->tx_cons = (uint32_t *)vaddr; tx_cntxt->tx_cons_paddr = paddr; vaddr += sizeof (uint32_t); paddr += sizeof (uint32_t); } ha->hw.dma_buf.flags.tx_ring = 1; QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n", __func__, (void *)(hw->dma_buf.tx_ring.dma_addr), hw->dma_buf.tx_ring.dma_b)); /* * Allocate Receive Descriptor Rings */ for (i = 0; i < hw->num_rds_rings; i++) { hw->dma_buf.rds_ring[i].alignment = 8; hw->dma_buf.rds_ring[i].size = (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS; if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) { device_printf(dev, "%s: rds ring[%d] alloc failed\n", __func__, i); for (j = 0; j < i; j++) ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]); goto ql_alloc_dma_exit; } QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n", __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr), hw->dma_buf.rds_ring[i].dma_b)); } hw->dma_buf.flags.rds_ring = 1; /* * Allocate Status Descriptor Rings */ for (i = 0; i < hw->num_sds_rings; i++) { hw->dma_buf.sds_ring[i].alignment = 8; hw->dma_buf.sds_ring[i].size = (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS; if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) { device_printf(dev, "%s: sds ring alloc failed\n", __func__); for (j = 0; j < i; j++) ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]); goto ql_alloc_dma_exit; } QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n", __func__, i, (void *)(hw->dma_buf.sds_ring[i].dma_addr), hw->dma_buf.sds_ring[i].dma_b)); } for (i = 0; i < hw->num_sds_rings; i++) { hw->sds[i].sds_ring_base = (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b; } hw->dma_buf.flags.sds_ring = 1; return 0; ql_alloc_dma_exit: ql_free_dma(ha); return -1; } #define Q8_MBX_MSEC_DELAY 5000 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox, uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause) { uint32_t i; uint32_t data; int ret = 0; uint64_t start_usecs; uint64_t end_usecs; uint64_t msecs_200; ql_sp_log(ha, 0, 5, no_pause, h_mbox[0], h_mbox[1], h_mbox[2], h_mbox[3]); if (ha->offline || ha->qla_initiate_recovery) { ql_sp_log(ha, 1, 2, ha->offline, ha->qla_initiate_recovery, 0, 0, 0); goto exit_qla_mbx_cmd; } if (((ha->err_inject & 0xFFFF) == INJCT_MBX_CMD_FAILURE) && (((ha->err_inject & ~0xFFFF) == ((h_mbox[0] & 0xFFFF) << 16))|| !(ha->err_inject & ~0xFFFF))) { ret = -3; QL_INITIATE_RECOVERY(ha); goto exit_qla_mbx_cmd; } start_usecs = qla_get_usec_timestamp(); if (no_pause) i = 1000; else i = Q8_MBX_MSEC_DELAY; while (i) { if (ha->qla_initiate_recovery) { ql_sp_log(ha, 2, 1, ha->qla_initiate_recovery, 0, 0, 0, 0); return (-1); } data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL); if (data == 0) break; if (no_pause) { DELAY(1000); } else { qla_mdelay(__func__, 1); } i--; } if (i == 0) { device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n", __func__, data); ql_sp_log(ha, 3, 1, data, 0, 0, 0, 0); ret = -1; ha->hw.mbx_comp_msecs[(Q8_MBX_COMP_MSECS - 2)]++; QL_INITIATE_RECOVERY(ha); goto exit_qla_mbx_cmd; } for (i = 0; i < n_hmbox; i++) { WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox); h_mbox++; } WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1); i = Q8_MBX_MSEC_DELAY; while (i) { if (ha->qla_initiate_recovery) { ql_sp_log(ha, 4, 1, ha->qla_initiate_recovery, 0, 0, 0, 0); return (-1); } data = READ_REG32(ha, Q8_FW_MBOX_CNTRL); if ((data & 0x3) == 1) { data = READ_REG32(ha, Q8_FW_MBOX0); if ((data & 0xF000) != 0x8000) break; } if (no_pause) { DELAY(1000); } else { qla_mdelay(__func__, 1); } i--; } if (i == 0) { device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n", __func__, data); ql_sp_log(ha, 5, 1, data, 0, 0, 0, 0); ret = -2; ha->hw.mbx_comp_msecs[(Q8_MBX_COMP_MSECS - 1)]++; QL_INITIATE_RECOVERY(ha); goto exit_qla_mbx_cmd; } for (i = 0; i < n_fwmbox; i++) { if (ha->qla_initiate_recovery) { ql_sp_log(ha, 6, 1, ha->qla_initiate_recovery, 0, 0, 0, 0); return (-1); } *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2))); } WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0); WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0); end_usecs = qla_get_usec_timestamp(); if (end_usecs > start_usecs) { msecs_200 = (end_usecs - start_usecs)/(1000 * 200); if (msecs_200 < 15) ha->hw.mbx_comp_msecs[msecs_200]++; else if (msecs_200 < 20) ha->hw.mbx_comp_msecs[15]++; else { device_printf(ha->pci_dev, "%s: [%ld, %ld] %ld\n", __func__, start_usecs, end_usecs, msecs_200); ha->hw.mbx_comp_msecs[16]++; } } ql_sp_log(ha, 7, 5, fw_mbox[0], fw_mbox[1], fw_mbox[2], fw_mbox[3], fw_mbox[4]); exit_qla_mbx_cmd: return (ret); } int qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb, uint32_t *num_rcvq) { uint32_t *mbox, err; device_t dev = ha->pci_dev; bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX)); mbox = ha->hw.mbox; mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29); if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } err = mbox[0] >> 25; if (supports_9kb != NULL) { if (mbox[16] & 0x80) /* bit 7 of mbox 16 */ *supports_9kb = 1; else *supports_9kb = 0; } if (num_rcvq != NULL) *num_rcvq = ((mbox[6] >> 16) & 0xFFFF); if ((err != 1) && (err != 0)) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } return 0; } static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs, uint32_t create) { uint32_t i, err; device_t dev = ha->pci_dev; q80_config_intr_t *c_intr; q80_config_intr_rsp_t *c_intr_rsp; c_intr = (q80_config_intr_t *)ha->hw.mbox; bzero(c_intr, (sizeof (q80_config_intr_t))); c_intr->opcode = Q8_MBX_CONFIG_INTR; c_intr->count_version = (sizeof (q80_config_intr_t) >> 2); c_intr->count_version |= Q8_MBX_CMD_VERSION; c_intr->nentries = num_intrs; for (i = 0; i < num_intrs; i++) { if (create) { c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE; c_intr->intr[i].msix_index = start_idx + 1 + i; } else { c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE; c_intr->intr[i].msix_index = ha->hw.intr_id[(start_idx + i)]; } c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X; } if (qla_mbx_cmd(ha, (uint32_t *)c_intr, (sizeof (q80_config_intr_t) >> 2), ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) { device_printf(dev, "%s: %s failed0\n", __func__, (create ? "create" : "delete")); return (-1); } c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status); if (err) { device_printf(dev, "%s: %s failed1 [0x%08x, %d]\n", __func__, (create ? "create" : "delete"), err, c_intr_rsp->nentries); for (i = 0; i < c_intr_rsp->nentries; i++) { device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n", __func__, i, c_intr_rsp->intr[i].status, c_intr_rsp->intr[i].intr_id, c_intr_rsp->intr[i].intr_src); } return (-1); } for (i = 0; ((i < num_intrs) && create); i++) { if (!c_intr_rsp->intr[i].status) { ha->hw.intr_id[(start_idx + i)] = c_intr_rsp->intr[i].intr_id; ha->hw.intr_src[(start_idx + i)] = c_intr_rsp->intr[i].intr_src; } } return (0); } /* * Name: qla_config_rss * Function: Configure RSS for the context/interface. */ static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, 0x255b0ec26d5a56daULL }; static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id) { q80_config_rss_t *c_rss; q80_config_rss_rsp_t *c_rss_rsp; uint32_t err, i; device_t dev = ha->pci_dev; c_rss = (q80_config_rss_t *)ha->hw.mbox; bzero(c_rss, (sizeof (q80_config_rss_t))); c_rss->opcode = Q8_MBX_CONFIG_RSS; c_rss->count_version = (sizeof (q80_config_rss_t) >> 2); c_rss->count_version |= Q8_MBX_CMD_VERSION; c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP | Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP); //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP | // Q8_MBX_RSS_HASH_TYPE_IPV6_TCP); c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS; c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE; c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK; c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID; c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS; c_rss->cntxt_id = cntxt_id; for (i = 0; i < 5; i++) { c_rss->rss_key[i] = rss_key[i]; } if (qla_mbx_cmd(ha, (uint32_t *)c_rss, (sizeof (q80_config_rss_t) >> 2), ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } return 0; } static int qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count, uint16_t cntxt_id, uint8_t *ind_table) { q80_config_rss_ind_table_t *c_rss_ind; q80_config_rss_ind_table_rsp_t *c_rss_ind_rsp; uint32_t err; device_t dev = ha->pci_dev; if ((count > Q8_RSS_IND_TBL_SIZE) || ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) { device_printf(dev, "%s: illegal count [%d, %d]\n", __func__, start_idx, count); return (-1); } c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox; bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t)); c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE; c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2); c_rss_ind->count_version |= Q8_MBX_CMD_VERSION; c_rss_ind->start_idx = start_idx; c_rss_ind->end_idx = start_idx + count - 1; c_rss_ind->cntxt_id = cntxt_id; bcopy(ind_table, c_rss_ind->ind_table, count); if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind, (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox, (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } return 0; } /* * Name: qla_config_intr_coalesce * Function: Configure Interrupt Coalescing. */ static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable, int rcv) { q80_config_intr_coalesc_t *intrc; q80_config_intr_coalesc_rsp_t *intrc_rsp; uint32_t err, i; device_t dev = ha->pci_dev; intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox; bzero(intrc, (sizeof (q80_config_intr_coalesc_t))); intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE; intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2); intrc->count_version |= Q8_MBX_CMD_VERSION; if (rcv) { intrc->flags = Q8_MBX_INTRC_FLAGS_RCV; intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF; intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF; } else { intrc->flags = Q8_MBX_INTRC_FLAGS_XMT; intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF; intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF; } intrc->cntxt_id = cntxt_id; if (tenable) { intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC; intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC; for (i = 0; i < ha->hw.num_sds_rings; i++) { intrc->sds_ring_mask |= (1 << i); } intrc->ms_timeout = 1000; } if (qla_mbx_cmd(ha, (uint32_t *)intrc, (sizeof (q80_config_intr_coalesc_t) >> 2), ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } return 0; } /* * Name: qla_config_mac_addr * Function: binds a MAC address to the context/interface. * Can be unicast, multicast or broadcast. */ static int qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac, uint32_t num_mac) { q80_config_mac_addr_t *cmac; q80_config_mac_addr_rsp_t *cmac_rsp; uint32_t err; device_t dev = ha->pci_dev; int i; uint8_t *mac_cpy = mac_addr; if (num_mac > Q8_MAX_MAC_ADDRS) { device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n", __func__, (add_mac ? "Add" : "Del"), num_mac); return (-1); } cmac = (q80_config_mac_addr_t *)ha->hw.mbox; bzero(cmac, (sizeof (q80_config_mac_addr_t))); cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR; cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2; cmac->count_version |= Q8_MBX_CMD_VERSION; if (add_mac) cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR; else cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR; cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS; cmac->nmac_entries = num_mac; cmac->cntxt_id = ha->hw.rcv_cntxt_id; for (i = 0; i < num_mac; i++) { bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN); mac_addr = mac_addr + ETHER_ADDR_LEN; } if (qla_mbx_cmd(ha, (uint32_t *)cmac, (sizeof (q80_config_mac_addr_t) >> 2), ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) { device_printf(dev, "%s: %s failed0\n", __func__, (add_mac ? "Add" : "Del")); return (-1); } cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status); if (err) { device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__, (add_mac ? "Add" : "Del"), err); for (i = 0; i < num_mac; i++) { device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, mac_cpy[0], mac_cpy[1], mac_cpy[2], mac_cpy[3], mac_cpy[4], mac_cpy[5]); mac_cpy += ETHER_ADDR_LEN; } return (-1); } return 0; } /* * Name: qla_set_mac_rcv_mode * Function: Enable/Disable AllMulticast and Promiscous Modes. */ static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode) { q80_config_mac_rcv_mode_t *rcv_mode; uint32_t err; q80_config_mac_rcv_mode_rsp_t *rcv_mode_rsp; device_t dev = ha->pci_dev; rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox; bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t))); rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE; rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2; rcv_mode->count_version |= Q8_MBX_CMD_VERSION; rcv_mode->mode = mode; rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id; if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode, (sizeof (q80_config_mac_rcv_mode_t) >> 2), ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } return 0; } int ql_set_promisc(qla_host_t *ha) { int ret; ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE; ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); return (ret); } void qla_reset_promisc(qla_host_t *ha) { ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE; (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); } int ql_set_allmulti(qla_host_t *ha) { int ret; ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE; ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); return (ret); } void qla_reset_allmulti(qla_host_t *ha) { ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE; (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); } /* * Name: ql_set_max_mtu * Function: * Sets the maximum transfer unit size for the specified rcv context. */ int ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id) { device_t dev; q80_set_max_mtu_t *max_mtu; q80_set_max_mtu_rsp_t *max_mtu_rsp; uint32_t err; dev = ha->pci_dev; max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox; bzero(max_mtu, (sizeof (q80_set_max_mtu_t))); max_mtu->opcode = Q8_MBX_SET_MAX_MTU; max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2); max_mtu->count_version |= Q8_MBX_CMD_VERSION; max_mtu->cntxt_id = cntxt_id; max_mtu->mtu = mtu; if (qla_mbx_cmd(ha, (uint32_t *)max_mtu, (sizeof (q80_set_max_mtu_t) >> 2), ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) { device_printf(dev, "%s: failed\n", __func__); return -1; } max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); } return 0; } static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id) { device_t dev; q80_link_event_t *lnk; q80_link_event_rsp_t *lnk_rsp; uint32_t err; dev = ha->pci_dev; lnk = (q80_link_event_t *)ha->hw.mbox; bzero(lnk, (sizeof (q80_link_event_t))); lnk->opcode = Q8_MBX_LINK_EVENT_REQ; lnk->count_version = (sizeof (q80_link_event_t) >> 2); lnk->count_version |= Q8_MBX_CMD_VERSION; lnk->cntxt_id = cntxt_id; lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC; if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2), ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); } return 0; } static int qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id) { device_t dev; q80_config_fw_lro_t *fw_lro; q80_config_fw_lro_rsp_t *fw_lro_rsp; uint32_t err; dev = ha->pci_dev; fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox; bzero(fw_lro, sizeof(q80_config_fw_lro_t)); fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO; fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2); fw_lro->count_version |= Q8_MBX_CMD_VERSION; fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK; fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK; fw_lro->cntxt_id = cntxt_id; if (qla_mbx_cmd(ha, (uint32_t *)fw_lro, (sizeof (q80_config_fw_lro_t) >> 2), ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); } return 0; } static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode) { device_t dev; q80_hw_config_t *hw_config; q80_hw_config_rsp_t *hw_config_rsp; uint32_t err; dev = ha->pci_dev; hw_config = (q80_hw_config_t *)ha->hw.mbox; bzero(hw_config, sizeof (q80_hw_config_t)); hw_config->opcode = Q8_MBX_HW_CONFIG; hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT; hw_config->count_version |= Q8_MBX_CMD_VERSION; hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE; hw_config->u.set_cam_search_mode.mode = search_mode; if (qla_mbx_cmd(ha, (uint32_t *)hw_config, (sizeof (q80_hw_config_t) >> 2), ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); } return 0; } static int qla_get_cam_search_mode(qla_host_t *ha) { device_t dev; q80_hw_config_t *hw_config; q80_hw_config_rsp_t *hw_config_rsp; uint32_t err; dev = ha->pci_dev; hw_config = (q80_hw_config_t *)ha->hw.mbox; bzero(hw_config, sizeof (q80_hw_config_t)); hw_config->opcode = Q8_MBX_HW_CONFIG; hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT; hw_config->count_version |= Q8_MBX_CMD_VERSION; hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE; if (qla_mbx_cmd(ha, (uint32_t *)hw_config, (sizeof (q80_hw_config_t) >> 2), ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); } else { device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__, hw_config_rsp->u.get_cam_search_mode.mode); } return 0; } static int qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size) { device_t dev; q80_get_stats_t *stat; q80_get_stats_rsp_t *stat_rsp; uint32_t err; dev = ha->pci_dev; stat = (q80_get_stats_t *)ha->hw.mbox; bzero(stat, (sizeof (q80_get_stats_t))); stat->opcode = Q8_MBX_GET_STATS; stat->count_version = 2; stat->count_version |= Q8_MBX_CMD_VERSION; stat->cmd = cmd; if (qla_mbx_cmd(ha, (uint32_t *)stat, 2, ha->hw.mbox, (rsp_size >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status); if (err) { return -1; } return 0; } void ql_get_stats(qla_host_t *ha) { q80_get_stats_rsp_t *stat_rsp; q80_mac_stats_t *mstat; q80_xmt_stats_t *xstat; q80_rcv_stats_t *rstat; uint32_t cmd; int i; struct ifnet *ifp = ha->ifp; if (ifp == NULL) return; if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) != 0) { device_printf(ha->pci_dev, "%s: failed\n", __func__); return; } if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { QLA_UNLOCK(ha, __func__); return; } stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox; /* * Get MAC Statistics */ cmd = Q8_GET_STATS_CMD_TYPE_MAC; // cmd |= Q8_GET_STATS_CMD_CLEAR; cmd |= ((ha->pci_func & 0x1) << 16); if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || ha->offline) goto ql_get_stats_exit; if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) { mstat = (q80_mac_stats_t *)&stat_rsp->u.mac; bcopy(mstat, &ha->hw.mac, sizeof(q80_mac_stats_t)); } else { device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n", __func__, ha->hw.mbox[0]); } /* * Get RCV Statistics */ cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT; // cmd |= Q8_GET_STATS_CMD_CLEAR; cmd |= (ha->hw.rcv_cntxt_id << 16); if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || ha->offline) goto ql_get_stats_exit; if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) { rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv; bcopy(rstat, &ha->hw.rcv, sizeof(q80_rcv_stats_t)); } else { device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n", __func__, ha->hw.mbox[0]); } if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || ha->offline) goto ql_get_stats_exit; /* * Get XMT Statistics */ for (i = 0 ; (i < ha->hw.num_tx_rings); i++) { if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || ha->offline) goto ql_get_stats_exit; cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT; // cmd |= Q8_GET_STATS_CMD_CLEAR; cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16); if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t)) == 0) { xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt; bcopy(xstat, &ha->hw.xmt[i], sizeof(q80_xmt_stats_t)); } else { device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n", __func__, ha->hw.mbox[0]); } } ql_get_stats_exit: QLA_UNLOCK(ha, __func__); return; } /* * Name: qla_tx_tso * Function: Checks if the packet to be transmitted is a candidate for * Large TCP Segment Offload. If yes, the appropriate fields in the Tx * Ring Structure are plugged in. */ static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr) { struct ether_vlan_header *eh; struct ip *ip = NULL; struct ip6_hdr *ip6 = NULL; struct tcphdr *th = NULL; uint32_t ehdrlen, hdrlen, ip_hlen, tcp_hlen, tcp_opt_off; uint16_t etype, opcode, offload = 1; eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; etype = ntohs(eh->evl_proto); } else { ehdrlen = ETHER_HDR_LEN; etype = ntohs(eh->evl_encap_proto); } hdrlen = 0; switch (etype) { case ETHERTYPE_IP: tcp_opt_off = ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr); if (mp->m_len < tcp_opt_off) { m_copydata(mp, 0, tcp_opt_off, hdr); ip = (struct ip *)(hdr + ehdrlen); } else { ip = (struct ip *)(mp->m_data + ehdrlen); } ip_hlen = ip->ip_hl << 2; opcode = Q8_TX_CMD_OP_XMT_TCP_LSO; if ((ip->ip_p != IPPROTO_TCP) || (ip_hlen != sizeof (struct ip))){ /* IP Options are not supported */ offload = 0; } else th = (struct tcphdr *)((caddr_t)ip + ip_hlen); break; case ETHERTYPE_IPV6: tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) + sizeof (struct tcphdr); if (mp->m_len < tcp_opt_off) { m_copydata(mp, 0, tcp_opt_off, hdr); ip6 = (struct ip6_hdr *)(hdr + ehdrlen); } else { ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); } ip_hlen = sizeof(struct ip6_hdr); opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6; if (ip6->ip6_nxt != IPPROTO_TCP) { //device_printf(dev, "%s: ipv6\n", __func__); offload = 0; } else th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen); break; default: QL_DPRINT8(ha, (ha->pci_dev, "%s: type!=ip\n", __func__)); offload = 0; break; } if (!offload) return (-1); tcp_hlen = th->th_off << 2; hdrlen = ehdrlen + ip_hlen + tcp_hlen; if (mp->m_len < hdrlen) { if (mp->m_len < tcp_opt_off) { if (tcp_hlen > sizeof(struct tcphdr)) { m_copydata(mp, tcp_opt_off, (tcp_hlen - sizeof(struct tcphdr)), &hdr[tcp_opt_off]); } } else { m_copydata(mp, 0, hdrlen, hdr); } } tx_cmd->mss = mp->m_pkthdr.tso_segsz; tx_cmd->flags_opcode = opcode ; tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen; tx_cmd->total_hdr_len = hdrlen; /* Check for Multicast least significant bit of MSB == 1 */ if (eh->evl_dhost[0] & 0x01) { tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST; } if (mp->m_len < hdrlen) { printf("%d\n", hdrlen); return (1); } return (0); } /* * Name: qla_tx_chksum * Function: Checks if the packet to be transmitted is a candidate for * TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx * Ring Structure are plugged in. */ static int qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code, uint32_t *tcp_hdr_off) { struct ether_vlan_header *eh; struct ip *ip; struct ip6_hdr *ip6; uint32_t ehdrlen, ip_hlen; uint16_t etype, opcode, offload = 1; uint8_t buf[sizeof(struct ip6_hdr)]; *op_code = 0; if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP|CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) == 0) return (-1); eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; etype = ntohs(eh->evl_proto); } else { ehdrlen = ETHER_HDR_LEN; etype = ntohs(eh->evl_encap_proto); } switch (etype) { case ETHERTYPE_IP: ip = (struct ip *)(mp->m_data + ehdrlen); ip_hlen = sizeof (struct ip); if (mp->m_len < (ehdrlen + ip_hlen)) { m_copydata(mp, ehdrlen, sizeof(struct ip), buf); ip = (struct ip *)buf; } if (ip->ip_p == IPPROTO_TCP) opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM; else if (ip->ip_p == IPPROTO_UDP) opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM; else { //device_printf(dev, "%s: ipv4\n", __func__); offload = 0; } break; case ETHERTYPE_IPV6: ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); ip_hlen = sizeof(struct ip6_hdr); if (mp->m_len < (ehdrlen + ip_hlen)) { m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr), buf); ip6 = (struct ip6_hdr *)buf; } if (ip6->ip6_nxt == IPPROTO_TCP) opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6; else if (ip6->ip6_nxt == IPPROTO_UDP) opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6; else { //device_printf(dev, "%s: ipv6\n", __func__); offload = 0; } break; default: offload = 0; break; } if (!offload) return (-1); *op_code = opcode; *tcp_hdr_off = (ip_hlen + ehdrlen); return (0); } #define QLA_TX_MIN_FREE 2 /* * Name: ql_hw_send * Function: Transmits a packet. It first checks if the packet is a * candidate for Large TCP Segment Offload and then for UDP/TCP checksum * offload. If either of these creteria are not met, it is transmitted * as a regular ethernet frame. */ int ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs, uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu) { struct ether_vlan_header *eh; qla_hw_t *hw = &ha->hw; q80_tx_cmd_t *tx_cmd, tso_cmd; bus_dma_segment_t *c_seg; uint32_t num_tx_cmds, hdr_len = 0; uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next; device_t dev; int i, ret; uint8_t *src = NULL, *dst = NULL; uint8_t frame_hdr[QL_FRAME_HDR_SIZE]; uint32_t op_code = 0; uint32_t tcp_hdr_off = 0; dev = ha->pci_dev; /* * Always make sure there is atleast one empty slot in the tx_ring * tx_ring is considered full when there only one entry available */ num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2; total_length = mp->m_pkthdr.len; if (total_length > QLA_MAX_TSO_FRAME_SIZE) { device_printf(dev, "%s: total length exceeds maxlen(%d)\n", __func__, total_length); return (EINVAL); } eh = mtod(mp, struct ether_vlan_header *); if (mp->m_pkthdr.csum_flags & CSUM_TSO) { bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t)); src = frame_hdr; ret = qla_tx_tso(ha, mp, &tso_cmd, src); if (!(ret & ~1)) { /* find the additional tx_cmd descriptors required */ if (mp->m_flags & M_VLANTAG) tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN; hdr_len = tso_cmd.total_hdr_len; bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN; bytes = QL_MIN(bytes, hdr_len); num_tx_cmds++; hdr_len -= bytes; while (hdr_len) { bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len); hdr_len -= bytes; num_tx_cmds++; } hdr_len = tso_cmd.total_hdr_len; if (ret == 0) src = (uint8_t *)eh; } else return (EINVAL); } else { (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off); } if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) { ql_hw_tx_done_locked(ha, txr_idx); if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) { QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= " "(num_tx_cmds + QLA_TX_MIN_FREE))\n", __func__)); return (-1); } } for (i = 0; i < num_tx_cmds; i++) { int j; j = (tx_idx+i) & (NUM_TX_DESCRIPTORS - 1); if (NULL != ha->tx_ring[txr_idx].tx_buf[j].m_head) { QL_ASSERT(ha, 0, \ ("%s [%d]: txr_idx = %d tx_idx = %d mbuf = %p\n",\ __func__, __LINE__, txr_idx, j,\ ha->tx_ring[txr_idx].tx_buf[j].m_head)); return (EINVAL); } } tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx]; if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) { if (nsegs > ha->hw.max_tx_segs) ha->hw.max_tx_segs = nsegs; bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); if (op_code) { tx_cmd->flags_opcode = op_code; tx_cmd->tcp_hdr_off = tcp_hdr_off; } else { tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER; } } else { bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t)); ha->tx_tso_frames++; } if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED; if (iscsi_pdu) eh->evl_tag |= ha->hw.user_pri_iscsi << 13; } else if (mp->m_flags & M_VLANTAG) { if (hdr_len) { /* TSO */ tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED | Q8_TX_CMD_FLAGS_HW_VLAN_ID); tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN; } else tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID; ha->hw_vlan_tx_frames++; tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag; if (iscsi_pdu) { tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13; mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci; } } tx_cmd->n_bufs = (uint8_t)nsegs; tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF); tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8))); tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func); c_seg = segs; while (1) { for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) { switch (i) { case 0: tx_cmd->buf1_addr = c_seg->ds_addr; tx_cmd->buf1_len = c_seg->ds_len; break; case 1: tx_cmd->buf2_addr = c_seg->ds_addr; tx_cmd->buf2_len = c_seg->ds_len; break; case 2: tx_cmd->buf3_addr = c_seg->ds_addr; tx_cmd->buf3_len = c_seg->ds_len; break; case 3: tx_cmd->buf4_addr = c_seg->ds_addr; tx_cmd->buf4_len = c_seg->ds_len; break; } c_seg++; nsegs--; } txr_next = hw->tx_cntxt[txr_idx].txr_next = (hw->tx_cntxt[txr_idx].txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); tx_cmd_count++; if (!nsegs) break; tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next]; bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); } if (mp->m_pkthdr.csum_flags & CSUM_TSO) { /* TSO : Copy the header in the following tx cmd descriptors */ txr_next = hw->tx_cntxt[txr_idx].txr_next; tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next]; bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN; bytes = QL_MIN(bytes, hdr_len); dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN; if (mp->m_flags & M_VLANTAG) { /* first copy the src/dst MAC addresses */ bcopy(src, dst, (ETHER_ADDR_LEN * 2)); dst += (ETHER_ADDR_LEN * 2); src += (ETHER_ADDR_LEN * 2); *((uint16_t *)dst) = htons(ETHERTYPE_VLAN); dst += 2; *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag); dst += 2; /* bytes left in src header */ hdr_len -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN); /* bytes left in TxCmd Entry */ bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN); bcopy(src, dst, bytes); src += bytes; hdr_len -= bytes; } else { bcopy(src, dst, bytes); src += bytes; hdr_len -= bytes; } txr_next = hw->tx_cntxt[txr_idx].txr_next = (hw->tx_cntxt[txr_idx].txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); tx_cmd_count++; while (hdr_len) { tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next]; bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len); bcopy(src, tx_cmd, bytes); src += bytes; hdr_len -= bytes; txr_next = hw->tx_cntxt[txr_idx].txr_next = (hw->tx_cntxt[txr_idx].txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); tx_cmd_count++; } } hw->tx_cntxt[txr_idx].txr_free = hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count; QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\ txr_idx); QL_DPRINT8(ha, (dev, "%s: return\n", __func__)); return (0); } #define Q8_CONFIG_IND_TBL_SIZE 32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */ static int qla_config_rss_ind_table(qla_host_t *ha) { uint32_t i, count; uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE]; for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) { rss_ind_tbl[i] = i % ha->hw.num_sds_rings; } for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ; i = i + Q8_CONFIG_IND_TBL_SIZE) { if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) { count = Q8_RSS_IND_TBL_MAX_IDX - i + 1; } else { count = Q8_CONFIG_IND_TBL_SIZE; } if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id, rss_ind_tbl)) return (-1); } return (0); } static int qla_config_soft_lro(qla_host_t *ha) { +#if defined(INET) || defined(INET6) int i; qla_hw_t *hw = &ha->hw; struct lro_ctrl *lro; for (i = 0; i < hw->num_sds_rings; i++) { lro = &hw->sds[i].lro; bzero(lro, sizeof(struct lro_ctrl)); #if (__FreeBSD_version >= 1100101) if (tcp_lro_init_args(lro, ha->ifp, 0, NUM_RX_DESCRIPTORS)) { device_printf(ha->pci_dev, "%s: tcp_lro_init_args [%d] failed\n", __func__, i); return (-1); } #else if (tcp_lro_init(lro)) { device_printf(ha->pci_dev, "%s: tcp_lro_init [%d] failed\n", __func__, i); return (-1); } #endif /* #if (__FreeBSD_version >= 1100101) */ lro->ifp = ha->ifp; } QL_DPRINT2(ha, (ha->pci_dev, "%s: LRO initialized\n", __func__)); +#endif return (0); } static void qla_drain_soft_lro(qla_host_t *ha) { +#if defined(INET) || defined(INET6) int i; qla_hw_t *hw = &ha->hw; struct lro_ctrl *lro; for (i = 0; i < hw->num_sds_rings; i++) { lro = &hw->sds[i].lro; #if (__FreeBSD_version >= 1100101) tcp_lro_flush_all(lro); #else struct lro_entry *queued; while ((!SLIST_EMPTY(&lro->lro_active))) { queued = SLIST_FIRST(&lro->lro_active); SLIST_REMOVE_HEAD(&lro->lro_active, next); tcp_lro_flush(lro, queued); } #endif /* #if (__FreeBSD_version >= 1100101) */ } +#endif return; } static void qla_free_soft_lro(qla_host_t *ha) { +#if defined(INET) || defined(INET6) int i; qla_hw_t *hw = &ha->hw; struct lro_ctrl *lro; for (i = 0; i < hw->num_sds_rings; i++) { lro = &hw->sds[i].lro; tcp_lro_free(lro); } +#endif return; } /* * Name: ql_del_hw_if * Function: Destroys the hardware specific entities corresponding to an * Ethernet Interface */ void ql_del_hw_if(qla_host_t *ha) { uint32_t i; uint32_t num_msix; (void)qla_stop_nic_func(ha); qla_del_rcv_cntxt(ha); if(qla_del_xmt_cntxt(ha)) goto ql_del_hw_if_exit; if (ha->hw.flags.init_intr_cnxt) { for (i = 0; i < ha->hw.num_sds_rings; ) { if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings) num_msix = Q8_MAX_INTR_VECTORS; else num_msix = ha->hw.num_sds_rings - i; if (qla_config_intr_cntxt(ha, i, num_msix, 0)) break; i += num_msix; } ha->hw.flags.init_intr_cnxt = 0; } ql_del_hw_if_exit: if (ha->hw.enable_soft_lro) { qla_drain_soft_lro(ha); qla_free_soft_lro(ha); } return; } void qla_confirm_9kb_enable(qla_host_t *ha) { // uint32_t supports_9kb = 0; ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX); /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */ WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2); WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0); #if 0 qla_get_nic_partition(ha, &supports_9kb, NULL); if (!supports_9kb) #endif ha->hw.enable_9kb = 0; return; } /* * Name: ql_init_hw_if * Function: Creates the hardware specific entities corresponding to an * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address * corresponding to the interface. Enables LRO if allowed. */ int ql_init_hw_if(qla_host_t *ha) { uint32_t i; uint8_t bcast_mac[6]; qla_rdesc_t *rdesc; uint32_t num_msix; for (i = 0; i < ha->hw.num_sds_rings; i++) { bzero(ha->hw.dma_buf.sds_ring[i].dma_b, ha->hw.dma_buf.sds_ring[i].size); } for (i = 0; i < ha->hw.num_sds_rings; ) { if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings) num_msix = Q8_MAX_INTR_VECTORS; else num_msix = ha->hw.num_sds_rings - i; if (qla_config_intr_cntxt(ha, i, num_msix, 1)) { if (i > 0) { num_msix = i; for (i = 0; i < num_msix; ) { qla_config_intr_cntxt(ha, i, Q8_MAX_INTR_VECTORS, 0); i += Q8_MAX_INTR_VECTORS; } } return (-1); } i = i + num_msix; } ha->hw.flags.init_intr_cnxt = 1; /* * Create Receive Context */ if (qla_init_rcv_cntxt(ha)) { return (-1); } for (i = 0; i < ha->hw.num_rds_rings; i++) { rdesc = &ha->hw.rds[i]; rdesc->rx_next = NUM_RX_DESCRIPTORS - 2; rdesc->rx_in = 0; /* Update the RDS Producer Indices */ QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\ rdesc->rx_next); } /* * Create Transmit Context */ if (qla_init_xmt_cntxt(ha)) { qla_del_rcv_cntxt(ha); return (-1); } ha->hw.max_tx_segs = 0; if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1)) return(-1); ha->hw.flags.unicast_mac = 1; bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF; bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF; if (qla_config_mac_addr(ha, bcast_mac, 1, 1)) return (-1); ha->hw.flags.bcast_mac = 1; /* * program any cached multicast addresses */ if (qla_hw_add_all_mcast(ha)) return (-1); if (ql_set_max_mtu(ha, ha->max_frame_size, ha->hw.rcv_cntxt_id)) return (-1); if (qla_config_rss(ha, ha->hw.rcv_cntxt_id)) return (-1); if (qla_config_rss_ind_table(ha)) return (-1); if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1)) return (-1); if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id)) return (-1); if (ha->ifp->if_capenable & IFCAP_LRO) { if (ha->hw.enable_hw_lro) { ha->hw.enable_soft_lro = 0; if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id)) return (-1); } else { ha->hw.enable_soft_lro = 1; if (qla_config_soft_lro(ha)) return (-1); } } if (qla_init_nic_func(ha)) return (-1); if (qla_query_fw_dcbx_caps(ha)) return (-1); for (i = 0; i < ha->hw.num_sds_rings; i++) QL_ENABLE_INTERRUPTS(ha, i); return (0); } static int qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx) { device_t dev = ha->pci_dev; q80_rq_map_sds_to_rds_t *map_rings; q80_rsp_map_sds_to_rds_t *map_rings_rsp; uint32_t i, err; qla_hw_t *hw = &ha->hw; map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox; bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t)); map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS; map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2); map_rings->count_version |= Q8_MBX_CMD_VERSION; map_rings->cntxt_id = hw->rcv_cntxt_id; map_rings->num_rings = num_idx; for (i = 0; i < num_idx; i++) { map_rings->sds_rds[i].sds_ring = i + start_idx; map_rings->sds_rds[i].rds_ring = i + start_idx; } if (qla_mbx_cmd(ha, (uint32_t *)map_rings, (sizeof (q80_rq_map_sds_to_rds_t) >> 2), ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } return (0); } /* * Name: qla_init_rcv_cntxt * Function: Creates the Receive Context. */ static int qla_init_rcv_cntxt(qla_host_t *ha) { q80_rq_rcv_cntxt_t *rcntxt; q80_rsp_rcv_cntxt_t *rcntxt_rsp; q80_stat_desc_t *sdesc; int i, j; qla_hw_t *hw = &ha->hw; device_t dev; uint32_t err; uint32_t rcntxt_sds_rings; uint32_t rcntxt_rds_rings; uint32_t max_idx; dev = ha->pci_dev; /* * Create Receive Context */ for (i = 0; i < hw->num_sds_rings; i++) { sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0]; for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) { sdesc->data[0] = 1ULL; sdesc->data[1] = 1ULL; } } rcntxt_sds_rings = hw->num_sds_rings; if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS; rcntxt_rds_rings = hw->num_rds_rings; if (hw->num_rds_rings > MAX_RDS_RING_SETS) rcntxt_rds_rings = MAX_RDS_RING_SETS; rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox; bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t))); rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT; rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2); rcntxt->count_version |= Q8_MBX_CMD_VERSION; rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW | Q8_RCV_CNTXT_CAP0_LRO | Q8_RCV_CNTXT_CAP0_HW_LRO | Q8_RCV_CNTXT_CAP0_RSS | Q8_RCV_CNTXT_CAP0_SGL_LRO; if (ha->hw.enable_9kb) rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO; else rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO; if (ha->hw.num_rds_rings > 1) { rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5); rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS; } else rcntxt->nrds_sets_rings = 0x1 | (1 << 5); rcntxt->nsds_rings = rcntxt_sds_rings; rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE; rcntxt->rcv_vpid = 0; for (i = 0; i < rcntxt_sds_rings; i++) { rcntxt->sds[i].paddr = qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr); rcntxt->sds[i].size = qla_host_to_le32(NUM_STATUS_DESCRIPTORS); rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]); rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0); } for (i = 0; i < rcntxt_rds_rings; i++) { rcntxt->rds[i].paddr_std = qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr); if (ha->hw.enable_9kb) rcntxt->rds[i].std_bsize = qla_host_to_le64(MJUM9BYTES); else rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES); rcntxt->rds[i].std_nentries = qla_host_to_le32(NUM_RX_DESCRIPTORS); } if (qla_mbx_cmd(ha, (uint32_t *)rcntxt, (sizeof (q80_rq_rcv_cntxt_t) >> 2), ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } for (i = 0; i < rcntxt_sds_rings; i++) { hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i]; } for (i = 0; i < rcntxt_rds_rings; i++) { hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std; } hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id; ha->hw.flags.init_rx_cnxt = 1; if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) { for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) { if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings) max_idx = MAX_RCNTXT_SDS_RINGS; else max_idx = hw->num_sds_rings - i; err = qla_add_rcv_rings(ha, i, max_idx); if (err) return -1; i += max_idx; } } if (hw->num_rds_rings > 1) { for (i = 0; i < hw->num_rds_rings; ) { if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings) max_idx = MAX_SDS_TO_RDS_MAP; else max_idx = hw->num_rds_rings - i; err = qla_map_sds_to_rds(ha, i, max_idx); if (err) return -1; i += max_idx; } } return (0); } static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds) { device_t dev = ha->pci_dev; q80_rq_add_rcv_rings_t *add_rcv; q80_rsp_add_rcv_rings_t *add_rcv_rsp; uint32_t i,j, err; qla_hw_t *hw = &ha->hw; add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox; bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t)); add_rcv->opcode = Q8_MBX_ADD_RX_RINGS; add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2); add_rcv->count_version |= Q8_MBX_CMD_VERSION; add_rcv->nrds_sets_rings = nsds | (1 << 5); add_rcv->nsds_rings = nsds; add_rcv->cntxt_id = hw->rcv_cntxt_id; for (i = 0; i < nsds; i++) { j = i + sds_idx; add_rcv->sds[i].paddr = qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr); add_rcv->sds[i].size = qla_host_to_le32(NUM_STATUS_DESCRIPTORS); add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]); add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0); } for (i = 0; (i < nsds); i++) { j = i + sds_idx; add_rcv->rds[i].paddr_std = qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr); if (ha->hw.enable_9kb) add_rcv->rds[i].std_bsize = qla_host_to_le64(MJUM9BYTES); else add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES); add_rcv->rds[i].std_nentries = qla_host_to_le32(NUM_RX_DESCRIPTORS); } if (qla_mbx_cmd(ha, (uint32_t *)add_rcv, (sizeof (q80_rq_add_rcv_rings_t) >> 2), ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } for (i = 0; i < nsds; i++) { hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i]; } for (i = 0; i < nsds; i++) { hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std; } return (0); } /* * Name: qla_del_rcv_cntxt * Function: Destroys the Receive Context. */ static void qla_del_rcv_cntxt(qla_host_t *ha) { device_t dev = ha->pci_dev; q80_rcv_cntxt_destroy_t *rcntxt; q80_rcv_cntxt_destroy_rsp_t *rcntxt_rsp; uint32_t err; uint8_t bcast_mac[6]; if (!ha->hw.flags.init_rx_cnxt) return; if (qla_hw_del_all_mcast(ha)) return; if (ha->hw.flags.bcast_mac) { bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF; bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF; if (qla_config_mac_addr(ha, bcast_mac, 0, 1)) return; ha->hw.flags.bcast_mac = 0; } if (ha->hw.flags.unicast_mac) { if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1)) return; ha->hw.flags.unicast_mac = 0; } rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox; bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t))); rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT; rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2); rcntxt->count_version |= Q8_MBX_CMD_VERSION; rcntxt->cntxt_id = ha->hw.rcv_cntxt_id; if (qla_mbx_cmd(ha, (uint32_t *)rcntxt, (sizeof (q80_rcv_cntxt_destroy_t) >> 2), ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return; } rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); } ha->hw.flags.init_rx_cnxt = 0; return; } /* * Name: qla_init_xmt_cntxt * Function: Creates the Transmit Context. */ static int qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx) { device_t dev; qla_hw_t *hw = &ha->hw; q80_rq_tx_cntxt_t *tcntxt; q80_rsp_tx_cntxt_t *tcntxt_rsp; uint32_t err; qla_hw_tx_cntxt_t *hw_tx_cntxt; uint32_t intr_idx; hw_tx_cntxt = &hw->tx_cntxt[txr_idx]; dev = ha->pci_dev; /* * Create Transmit Context */ tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox; bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t))); tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT; tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2); tcntxt->count_version |= Q8_MBX_CMD_VERSION; intr_idx = txr_idx; #ifdef QL_ENABLE_ISCSI_TLV tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO | Q8_TX_CNTXT_CAP0_TC; if (txr_idx >= (ha->hw.num_tx_rings >> 1)) { tcntxt->traffic_class = 1; } intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1); #else tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO; #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ tcntxt->ntx_rings = 1; tcntxt->tx_ring[0].paddr = qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr); tcntxt->tx_ring[0].tx_consumer = qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr); tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS); tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]); tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0); hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS; hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0; *(hw_tx_cntxt->tx_cons) = 0; if (qla_mbx_cmd(ha, (uint32_t *)tcntxt, (sizeof (q80_rq_tx_cntxt_t) >> 2), ha->hw.mbox, (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return -1; } hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index; hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id; if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0)) return (-1); return (0); } /* * Name: qla_del_xmt_cntxt * Function: Destroys the Transmit Context. */ static int qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx) { device_t dev = ha->pci_dev; q80_tx_cntxt_destroy_t *tcntxt; q80_tx_cntxt_destroy_rsp_t *tcntxt_rsp; uint32_t err; tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox; bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t))); tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT; tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2); tcntxt->count_version |= Q8_MBX_CMD_VERSION; tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id; if (qla_mbx_cmd(ha, (uint32_t *)tcntxt, (sizeof (q80_tx_cntxt_destroy_t) >> 2), ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } return (0); } static int qla_del_xmt_cntxt(qla_host_t *ha) { uint32_t i; int ret = 0; if (!ha->hw.flags.init_tx_cnxt) return (ret); for (i = 0; i < ha->hw.num_tx_rings; i++) { if ((ret = qla_del_xmt_cntxt_i(ha, i)) != 0) break; } ha->hw.flags.init_tx_cnxt = 0; return (ret); } static int qla_init_xmt_cntxt(qla_host_t *ha) { uint32_t i, j; for (i = 0; i < ha->hw.num_tx_rings; i++) { if (qla_init_xmt_cntxt_i(ha, i) != 0) { for (j = 0; j < i; j++) { if (qla_del_xmt_cntxt_i(ha, j)) break; } return (-1); } } ha->hw.flags.init_tx_cnxt = 1; return (0); } static int qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast) { int i, nmcast; uint32_t count = 0; uint8_t *mcast; nmcast = ha->hw.nmcast; QL_DPRINT2(ha, (ha->pci_dev, "%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast)); mcast = ha->hw.mac_addr_arr; memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) { if ((ha->hw.mcast[i].addr[0] != 0) || (ha->hw.mcast[i].addr[1] != 0) || (ha->hw.mcast[i].addr[2] != 0) || (ha->hw.mcast[i].addr[3] != 0) || (ha->hw.mcast[i].addr[4] != 0) || (ha->hw.mcast[i].addr[5] != 0)) { bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN); mcast = mcast + ETHER_ADDR_LEN; count++; device_printf(ha->pci_dev, "%s: %x:%x:%x:%x:%x:%x \n", __func__, ha->hw.mcast[i].addr[0], ha->hw.mcast[i].addr[1], ha->hw.mcast[i].addr[2], ha->hw.mcast[i].addr[3], ha->hw.mcast[i].addr[4], ha->hw.mcast[i].addr[5]); if (count == Q8_MAX_MAC_ADDRS) { if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast, count)) { device_printf(ha->pci_dev, "%s: failed\n", __func__); return (-1); } count = 0; mcast = ha->hw.mac_addr_arr; memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); } nmcast--; } } if (count) { if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast, count)) { device_printf(ha->pci_dev, "%s: failed\n", __func__); return (-1); } } QL_DPRINT2(ha, (ha->pci_dev, "%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast)); return 0; } static int qla_hw_add_all_mcast(qla_host_t *ha) { int ret; ret = qla_hw_all_mcast(ha, 1); return (ret); } int qla_hw_del_all_mcast(qla_host_t *ha) { int ret; ret = qla_hw_all_mcast(ha, 0); bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS)); ha->hw.nmcast = 0; return (ret); } static int qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta) { int i; for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) return (0); /* its been already added */ } return (-1); } static int qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast) { int i; for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { if ((ha->hw.mcast[i].addr[0] == 0) && (ha->hw.mcast[i].addr[1] == 0) && (ha->hw.mcast[i].addr[2] == 0) && (ha->hw.mcast[i].addr[3] == 0) && (ha->hw.mcast[i].addr[4] == 0) && (ha->hw.mcast[i].addr[5] == 0)) { bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN); ha->hw.nmcast++; mta = mta + ETHER_ADDR_LEN; nmcast--; if (nmcast == 0) break; } } return 0; } static int qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast) { int i; for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) { ha->hw.mcast[i].addr[0] = 0; ha->hw.mcast[i].addr[1] = 0; ha->hw.mcast[i].addr[2] = 0; ha->hw.mcast[i].addr[3] = 0; ha->hw.mcast[i].addr[4] = 0; ha->hw.mcast[i].addr[5] = 0; ha->hw.nmcast--; mta = mta + ETHER_ADDR_LEN; nmcast--; if (nmcast == 0) break; } } return 0; } /* * Name: ql_hw_set_multi * Function: Sets the Multicast Addresses provided by the host O.S into the * hardware (for the given interface) */ int ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt, uint32_t add_mac) { uint8_t *mta = mcast_addr; int i; int ret = 0; uint32_t count = 0; uint8_t *mcast; mcast = ha->hw.mac_addr_arr; memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); for (i = 0; i < mcnt; i++) { if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) { if (add_mac) { if (qla_hw_mac_addr_present(ha, mta) != 0) { bcopy(mta, mcast, ETHER_ADDR_LEN); mcast = mcast + ETHER_ADDR_LEN; count++; } } else { if (qla_hw_mac_addr_present(ha, mta) == 0) { bcopy(mta, mcast, ETHER_ADDR_LEN); mcast = mcast + ETHER_ADDR_LEN; count++; } } } if (count == Q8_MAX_MAC_ADDRS) { if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac, count)) { device_printf(ha->pci_dev, "%s: failed\n", __func__); return (-1); } if (add_mac) { qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count); } else { qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count); } count = 0; mcast = ha->hw.mac_addr_arr; memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); } mta += Q8_MAC_ADDR_LEN; } if (count) { if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac, count)) { device_printf(ha->pci_dev, "%s: failed\n", __func__); return (-1); } if (add_mac) { qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count); } else { qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count); } } return (ret); } /* * Name: ql_hw_tx_done_locked * Function: Handle Transmit Completions */ void ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx) { qla_tx_buf_t *txb; qla_hw_t *hw = &ha->hw; uint32_t comp_idx, comp_count = 0; qla_hw_tx_cntxt_t *hw_tx_cntxt; hw_tx_cntxt = &hw->tx_cntxt[txr_idx]; /* retrieve index of last entry in tx ring completed */ comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons)); while (comp_idx != hw_tx_cntxt->txr_comp) { txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp]; hw_tx_cntxt->txr_comp++; if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS) hw_tx_cntxt->txr_comp = 0; comp_count++; if (txb->m_head) { if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1); bus_dmamap_sync(ha->tx_tag, txb->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ha->tx_tag, txb->map); m_freem(txb->m_head); txb->m_head = NULL; } } hw_tx_cntxt->txr_free += comp_count; if (hw_tx_cntxt->txr_free > NUM_TX_DESCRIPTORS) device_printf(ha->pci_dev, "%s [%d]: txr_idx = %d txr_free = %d" "txr_next = %d txr_comp = %d\n", __func__, __LINE__, txr_idx, hw_tx_cntxt->txr_free, hw_tx_cntxt->txr_next, hw_tx_cntxt->txr_comp); QL_ASSERT(ha, (hw_tx_cntxt->txr_free <= NUM_TX_DESCRIPTORS), \ ("%s [%d]: txr_idx = %d txr_free = %d txr_next = %d txr_comp = %d\n",\ __func__, __LINE__, txr_idx, hw_tx_cntxt->txr_free, \ hw_tx_cntxt->txr_next, hw_tx_cntxt->txr_comp)); return; } void ql_update_link_state(qla_host_t *ha) { uint32_t link_state = 0; uint32_t prev_link_state; prev_link_state = ha->hw.link_up; if (ha->ifp->if_drv_flags & IFF_DRV_RUNNING) { link_state = READ_REG32(ha, Q8_LINK_STATE); if (ha->pci_func == 0) { link_state = (((link_state & 0xF) == 1)? 1 : 0); } else { link_state = ((((link_state >> 4)& 0xF) == 1)? 1 : 0); } } atomic_store_rel_8(&ha->hw.link_up, (uint8_t)link_state); if (prev_link_state != ha->hw.link_up) { if (ha->hw.link_up) { if_link_state_change(ha->ifp, LINK_STATE_UP); } else { if_link_state_change(ha->ifp, LINK_STATE_DOWN); } } return; } int ql_hw_check_health(qla_host_t *ha) { uint32_t val; ha->hw.health_count++; if (ha->hw.health_count < 500) return 0; ha->hw.health_count = 0; val = READ_REG32(ha, Q8_ASIC_TEMPERATURE); if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) || (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) { device_printf(ha->pci_dev, "%s: Temperature Alert" " at ts_usecs %ld ts_reg = 0x%08x\n", __func__, qla_get_usec_timestamp(), val); if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_TEMP_FAILURE) ha->hw.sp_log_stop = -1; QL_INITIATE_RECOVERY(ha); return -1; } val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT); if ((val != ha->hw.hbeat_value) && (!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) { ha->hw.hbeat_value = val; ha->hw.hbeat_failure = 0; return 0; } ha->hw.hbeat_failure++; if ((ha->dbg_level & 0x8000) && (ha->hw.hbeat_failure == 1)) device_printf(ha->pci_dev, "%s: Heartbeat Failue 1[0x%08x]\n", __func__, val); if (ha->hw.hbeat_failure < 2) /* we ignore the first failure */ return 0; else { uint32_t peg_halt_status1; uint32_t peg_halt_status2; peg_halt_status1 = READ_REG32(ha, Q8_PEG_HALT_STATUS1); peg_halt_status2 = READ_REG32(ha, Q8_PEG_HALT_STATUS2); device_printf(ha->pci_dev, "%s: Heartbeat Failue at ts_usecs = %ld " "fw_heart_beat = 0x%08x " "peg_halt_status1 = 0x%08x " "peg_halt_status2 = 0x%08x\n", __func__, qla_get_usec_timestamp(), val, peg_halt_status1, peg_halt_status2); if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_HBEAT_FAILURE) ha->hw.sp_log_stop = -1; } QL_INITIATE_RECOVERY(ha); return -1; } static int qla_init_nic_func(qla_host_t *ha) { device_t dev; q80_init_nic_func_t *init_nic; q80_init_nic_func_rsp_t *init_nic_rsp; uint32_t err; dev = ha->pci_dev; init_nic = (q80_init_nic_func_t *)ha->hw.mbox; bzero(init_nic, sizeof(q80_init_nic_func_t)); init_nic->opcode = Q8_MBX_INIT_NIC_FUNC; init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2); init_nic->count_version |= Q8_MBX_CMD_VERSION; init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN; init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN; init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN; //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t)); if (qla_mbx_cmd(ha, (uint32_t *)init_nic, (sizeof (q80_init_nic_func_t) >> 2), ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox; // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t)); err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); } else { device_printf(dev, "%s: successful\n", __func__); } return 0; } static int qla_stop_nic_func(qla_host_t *ha) { device_t dev; q80_stop_nic_func_t *stop_nic; q80_stop_nic_func_rsp_t *stop_nic_rsp; uint32_t err; dev = ha->pci_dev; stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox; bzero(stop_nic, sizeof(q80_stop_nic_func_t)); stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC; stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2); stop_nic->count_version |= Q8_MBX_CMD_VERSION; stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN; stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN; //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t)); if (qla_mbx_cmd(ha, (uint32_t *)stop_nic, (sizeof (q80_stop_nic_func_t) >> 2), ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox; //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t)); err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); } return 0; } static int qla_query_fw_dcbx_caps(qla_host_t *ha) { device_t dev; q80_query_fw_dcbx_caps_t *fw_dcbx; q80_query_fw_dcbx_caps_rsp_t *fw_dcbx_rsp; uint32_t err; dev = ha->pci_dev; fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox; bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t)); fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS; fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2); fw_dcbx->count_version |= Q8_MBX_CMD_VERSION; ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t)); if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx, (sizeof (q80_query_fw_dcbx_caps_t) >> 2), ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox; ql_dump_buf8(ha, __func__, fw_dcbx_rsp, sizeof (q80_query_fw_dcbx_caps_rsp_t)); err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); } return 0; } static int qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2, uint32_t aen_mb3, uint32_t aen_mb4) { device_t dev; q80_idc_ack_t *idc_ack; q80_idc_ack_rsp_t *idc_ack_rsp; uint32_t err; int count = 300; dev = ha->pci_dev; idc_ack = (q80_idc_ack_t *)ha->hw.mbox; bzero(idc_ack, sizeof(q80_idc_ack_t)); idc_ack->opcode = Q8_MBX_IDC_ACK; idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2); idc_ack->count_version |= Q8_MBX_CMD_VERSION; idc_ack->aen_mb1 = aen_mb1; idc_ack->aen_mb2 = aen_mb2; idc_ack->aen_mb3 = aen_mb3; idc_ack->aen_mb4 = aen_mb4; ha->hw.imd_compl= 0; if (qla_mbx_cmd(ha, (uint32_t *)idc_ack, (sizeof (q80_idc_ack_t) >> 2), ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); return(-1); } while (count && !ha->hw.imd_compl) { qla_mdelay(__func__, 100); count--; } if (!count) return -1; else device_printf(dev, "%s: count %d\n", __func__, count); return (0); } static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits) { device_t dev; q80_set_port_cfg_t *pcfg; q80_set_port_cfg_rsp_t *pfg_rsp; uint32_t err; int count = 300; dev = ha->pci_dev; pcfg = (q80_set_port_cfg_t *)ha->hw.mbox; bzero(pcfg, sizeof(q80_set_port_cfg_t)); pcfg->opcode = Q8_MBX_SET_PORT_CONFIG; pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2); pcfg->count_version |= Q8_MBX_CMD_VERSION; pcfg->cfg_bits = cfg_bits; device_printf(dev, "%s: cfg_bits" " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]" " [0x%x, 0x%x, 0x%x]\n", __func__, ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20), ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5), ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)); ha->hw.imd_compl= 0; if (qla_mbx_cmd(ha, (uint32_t *)pcfg, (sizeof (q80_set_port_cfg_t) >> 2), ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status); if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) { while (count && !ha->hw.imd_compl) { qla_mdelay(__func__, 100); count--; } if (count) { device_printf(dev, "%s: count %d\n", __func__, count); err = 0; } } if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); return(-1); } return (0); } static int qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size) { uint32_t err; device_t dev = ha->pci_dev; q80_config_md_templ_size_t *md_size; q80_config_md_templ_size_rsp_t *md_size_rsp; #ifndef QL_LDFLASH_FW ql_minidump_template_hdr_t *hdr; hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump; *size = hdr->size_of_template; return (0); #endif /* #ifdef QL_LDFLASH_FW */ md_size = (q80_config_md_templ_size_t *) ha->hw.mbox; bzero(md_size, sizeof(q80_config_md_templ_size_t)); md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE; md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2); md_size->count_version |= Q8_MBX_CMD_VERSION; if (qla_mbx_cmd(ha, (uint32_t *) md_size, (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox, (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return (-1); } md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox; err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); return(-1); } *size = md_size_rsp->templ_size; return (0); } static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits) { device_t dev; q80_get_port_cfg_t *pcfg; q80_get_port_cfg_rsp_t *pcfg_rsp; uint32_t err; dev = ha->pci_dev; pcfg = (q80_get_port_cfg_t *)ha->hw.mbox; bzero(pcfg, sizeof(q80_get_port_cfg_t)); pcfg->opcode = Q8_MBX_GET_PORT_CONFIG; pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2); pcfg->count_version |= Q8_MBX_CMD_VERSION; if (qla_mbx_cmd(ha, (uint32_t *)pcfg, (sizeof (q80_get_port_cfg_t) >> 2), ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); return(-1); } device_printf(dev, "%s: [cfg_bits, port type]" " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]" " [0x%x, 0x%x, 0x%x]\n", __func__, pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type, ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20), ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5), ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0) ); *cfg_bits = pcfg_rsp->cfg_bits; return (0); } int ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp) { struct ether_vlan_header *eh; uint16_t etype; struct ip *ip = NULL; struct ip6_hdr *ip6 = NULL; struct tcphdr *th = NULL; uint32_t hdrlen; uint32_t offset; uint8_t buf[sizeof(struct ip6_hdr)]; eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; etype = ntohs(eh->evl_proto); } else { hdrlen = ETHER_HDR_LEN; etype = ntohs(eh->evl_encap_proto); } if (etype == ETHERTYPE_IP) { offset = (hdrlen + sizeof (struct ip)); if (mp->m_len >= offset) { ip = (struct ip *)(mp->m_data + hdrlen); } else { m_copydata(mp, hdrlen, sizeof (struct ip), buf); ip = (struct ip *)buf; } if (ip->ip_p == IPPROTO_TCP) { hdrlen += ip->ip_hl << 2; offset = hdrlen + 4; if (mp->m_len >= offset) { th = (struct tcphdr *)(mp->m_data + hdrlen); } else { m_copydata(mp, hdrlen, 4, buf); th = (struct tcphdr *)buf; } } } else if (etype == ETHERTYPE_IPV6) { offset = (hdrlen + sizeof (struct ip6_hdr)); if (mp->m_len >= offset) { ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen); } else { m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf); ip6 = (struct ip6_hdr *)buf; } if (ip6->ip6_nxt == IPPROTO_TCP) { hdrlen += sizeof(struct ip6_hdr); offset = hdrlen + 4; if (mp->m_len >= offset) { th = (struct tcphdr *)(mp->m_data + hdrlen); } else { m_copydata(mp, hdrlen, 4, buf); th = (struct tcphdr *)buf; } } } if (th != NULL) { if ((th->th_sport == htons(3260)) || (th->th_dport == htons(3260))) return 0; } return (-1); } void qla_hw_async_event(qla_host_t *ha) { switch (ha->hw.aen_mb0) { case 0x8101: (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2, ha->hw.aen_mb3, ha->hw.aen_mb4); break; default: break; } return; } #ifdef QL_LDFLASH_FW static int ql_get_minidump_template(qla_host_t *ha) { uint32_t err; device_t dev = ha->pci_dev; q80_config_md_templ_cmd_t *md_templ; q80_config_md_templ_cmd_rsp_t *md_templ_rsp; md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox; bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t))); md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT; md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2); md_templ->count_version |= Q8_MBX_CMD_VERSION; md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr; md_templ->buff_size = ha->hw.dma_buf.minidump.size; if (qla_mbx_cmd(ha, (uint32_t *) md_templ, (sizeof(q80_config_md_templ_cmd_t) >> 2), ha->hw.mbox, (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return (-1); } md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox; err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); return (-1); } return (0); } #endif /* #ifdef QL_LDFLASH_FW */ /* * Minidump related functionality */ static int ql_parse_template(qla_host_t *ha); static uint32_t ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t *crb_entry, uint32_t * data_buff); static uint32_t ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry, uint32_t * data_buff); static uint32_t ql_pollrd_modify_write(qla_host_t *ha, ql_minidump_entry_rd_modify_wr_with_poll_t *entry, uint32_t *data_buff); static uint32_t ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry, uint32_t * data_buff); static uint32_t ql_L1Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry, uint32_t *data_buff); static uint32_t ql_rdocm(qla_host_t *ha, ql_minidump_entry_rdocm_t *ocmEntry, uint32_t *data_buff); static uint32_t ql_rdmem(qla_host_t *ha, ql_minidump_entry_rdmem_t *mem_entry, uint32_t *data_buff); static uint32_t ql_rdrom(qla_host_t *ha, ql_minidump_entry_rdrom_t *romEntry, uint32_t *data_buff); static uint32_t ql_rdmux(qla_host_t *ha, ql_minidump_entry_mux_t *muxEntry, uint32_t *data_buff); static uint32_t ql_rdmux2(qla_host_t *ha, ql_minidump_entry_mux2_t *muxEntry, uint32_t *data_buff); static uint32_t ql_rdqueue(qla_host_t *ha, ql_minidump_entry_queue_t *queueEntry, uint32_t *data_buff); static uint32_t ql_cntrl(qla_host_t *ha, ql_minidump_template_hdr_t *template_hdr, ql_minidump_entry_cntrl_t *crbEntry); static uint32_t ql_minidump_size(qla_host_t *ha) { uint32_t i, k; uint32_t size = 0; ql_minidump_template_hdr_t *hdr; hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b; i = 0x2; for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) { if (i & ha->hw.mdump_capture_mask) size += hdr->capture_size_array[k]; i = i << 1; } return (size); } static void ql_free_minidump_buffer(qla_host_t *ha) { if (ha->hw.mdump_buffer != NULL) { free(ha->hw.mdump_buffer, M_QLA83XXBUF); ha->hw.mdump_buffer = NULL; ha->hw.mdump_buffer_size = 0; } return; } static int ql_alloc_minidump_buffer(qla_host_t *ha) { ha->hw.mdump_buffer_size = ql_minidump_size(ha); if (!ha->hw.mdump_buffer_size) return (-1); ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF, M_NOWAIT); if (ha->hw.mdump_buffer == NULL) return (-1); return (0); } static void ql_free_minidump_template_buffer(qla_host_t *ha) { if (ha->hw.mdump_template != NULL) { free(ha->hw.mdump_template, M_QLA83XXBUF); ha->hw.mdump_template = NULL; ha->hw.mdump_template_size = 0; } return; } static int ql_alloc_minidump_template_buffer(qla_host_t *ha) { ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size; ha->hw.mdump_template = malloc(ha->hw.mdump_template_size, M_QLA83XXBUF, M_NOWAIT); if (ha->hw.mdump_template == NULL) return (-1); return (0); } static int ql_alloc_minidump_buffers(qla_host_t *ha) { int ret; ret = ql_alloc_minidump_template_buffer(ha); if (ret) return (ret); ret = ql_alloc_minidump_buffer(ha); if (ret) ql_free_minidump_template_buffer(ha); return (ret); } static uint32_t ql_validate_minidump_checksum(qla_host_t *ha) { uint64_t sum = 0; int count; uint32_t *template_buff; count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t); template_buff = ha->hw.dma_buf.minidump.dma_b; while (count-- > 0) { sum += *template_buff++; } while (sum >> 32) { sum = (sum & 0xFFFFFFFF) + (sum >> 32); } return (~sum); } int ql_minidump_init(qla_host_t *ha) { int ret = 0; uint32_t template_size = 0; device_t dev = ha->pci_dev; /* * Get Minidump Template Size */ ret = qla_get_minidump_tmplt_size(ha, &template_size); if (ret || (template_size == 0)) { device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret, template_size); return (-1); } /* * Allocate Memory for Minidump Template */ ha->hw.dma_buf.minidump.alignment = 8; ha->hw.dma_buf.minidump.size = template_size; #ifdef QL_LDFLASH_FW if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) { device_printf(dev, "%s: minidump dma alloc failed\n", __func__); return (-1); } ha->hw.dma_buf.flags.minidump = 1; /* * Retrieve Minidump Template */ ret = ql_get_minidump_template(ha); #else ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump; #endif /* #ifdef QL_LDFLASH_FW */ if (ret == 0) { ret = ql_validate_minidump_checksum(ha); if (ret == 0) { ret = ql_alloc_minidump_buffers(ha); if (ret == 0) ha->hw.mdump_init = 1; else device_printf(dev, "%s: ql_alloc_minidump_buffers" " failed\n", __func__); } else { device_printf(dev, "%s: ql_validate_minidump_checksum" " failed\n", __func__); } } else { device_printf(dev, "%s: ql_get_minidump_template failed\n", __func__); } if (ret) ql_minidump_free(ha); return (ret); } static void ql_minidump_free(qla_host_t *ha) { ha->hw.mdump_init = 0; if (ha->hw.dma_buf.flags.minidump) { ha->hw.dma_buf.flags.minidump = 0; ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump); } ql_free_minidump_template_buffer(ha); ql_free_minidump_buffer(ha); return; } void ql_minidump(qla_host_t *ha) { if (!ha->hw.mdump_init) return; if (ha->hw.mdump_done) return; ha->hw.mdump_usec_ts = qla_get_usec_timestamp(); ha->hw.mdump_start_seq_index = ql_stop_sequence(ha); bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size); bzero(ha->hw.mdump_template, ha->hw.mdump_template_size); bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template, ha->hw.mdump_template_size); ql_parse_template(ha); ql_start_sequence(ha, ha->hw.mdump_start_seq_index); ha->hw.mdump_done = 1; return; } /* * helper routines */ static void ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize) { if (esize != entry->hdr.entry_capture_size) { entry->hdr.entry_capture_size = esize; entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG; } return; } static int ql_parse_template(qla_host_t *ha) { uint32_t num_of_entries, buff_level, e_cnt, esize; uint32_t rv = 0; char *dump_buff, *dbuff; int sane_start = 0, sane_end = 0; ql_minidump_template_hdr_t *template_hdr; ql_minidump_entry_t *entry; uint32_t capture_mask; uint32_t dump_size; /* Setup parameters */ template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template; if (template_hdr->entry_type == TLHDR) sane_start = 1; dump_buff = (char *) ha->hw.mdump_buffer; num_of_entries = template_hdr->num_of_entries; entry = (ql_minidump_entry_t *) ((char *)template_hdr + template_hdr->first_entry_offset ); template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] = template_hdr->ocm_window_array[ha->pci_func]; template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func; capture_mask = ha->hw.mdump_capture_mask; dump_size = ha->hw.mdump_buffer_size; template_hdr->driver_capture_mask = capture_mask; QL_DPRINT80(ha, (ha->pci_dev, "%s: sane_start = %d num_of_entries = %d " "capture_mask = 0x%x dump_size = %d \n", __func__, sane_start, num_of_entries, capture_mask, dump_size)); for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) { /* * If the capture_mask of the entry does not match capture mask * skip the entry after marking the driver_flags indicator. */ if (!(entry->hdr.entry_capture_mask & capture_mask)) { entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; entry = (ql_minidump_entry_t *) ((char *) entry + entry->hdr.entry_size); continue; } /* * This is ONLY needed in implementations where * the capture buffer allocated is too small to capture * all of the required entries for a given capture mask. * We need to empty the buffer contents to a file * if possible, before processing the next entry * If the buff_full_flag is set, no further capture will happen * and all remaining non-control entries will be skipped. */ if (entry->hdr.entry_capture_size != 0) { if ((buff_level + entry->hdr.entry_capture_size) > dump_size) { /* Try to recover by emptying buffer to file */ entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; entry = (ql_minidump_entry_t *) ((char *) entry + entry->hdr.entry_size); continue; } } /* * Decode the entry type and process it accordingly */ switch (entry->hdr.entry_type) { case RDNOP: break; case RDEND: sane_end++; break; case RDCRB: dbuff = dump_buff + buff_level; esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case POLLRD: dbuff = dump_buff + buff_level; esize = ql_pollrd(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case POLLRDMWR: dbuff = dump_buff + buff_level; esize = ql_pollrd_modify_write(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case L2ITG: case L2DTG: case L2DAT: case L2INS: dbuff = dump_buff + buff_level; esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff); if (esize == -1) { entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; } else { ql_entry_err_chk(entry, esize); buff_level += esize; } break; case L1DAT: case L1INS: dbuff = dump_buff + buff_level; esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case RDOCM: dbuff = dump_buff + buff_level; esize = ql_rdocm(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case RDMEM: dbuff = dump_buff + buff_level; esize = ql_rdmem(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case BOARD: case RDROM: dbuff = dump_buff + buff_level; esize = ql_rdrom(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case RDMUX: dbuff = dump_buff + buff_level; esize = ql_rdmux(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case RDMUX2: dbuff = dump_buff + buff_level; esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case QUEUE: dbuff = dump_buff + buff_level; esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case CNTRL: if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) { entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; } break; default: entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; break; } /* next entry in the template */ entry = (ql_minidump_entry_t *) ((char *) entry + entry->hdr.entry_size); } if (!sane_start || (sane_end > 1)) { device_printf(ha->pci_dev, "\n%s: Template configuration error. Check Template\n", __func__); } QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n", __func__, template_hdr->num_of_entries)); return 0; } /* * Read CRB operation. */ static uint32_t ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry, uint32_t * data_buff) { int loop_cnt; int ret; uint32_t op_count, addr, stride, value = 0; addr = crb_entry->addr; op_count = crb_entry->op_count; stride = crb_entry->addr_stride; for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) { ret = ql_rdwr_indreg32(ha, addr, &value, 1); if (ret) return (0); *data_buff++ = addr; *data_buff++ = value; addr = addr + stride; } /* * for testing purpose we return amount of data written */ return (op_count * (2 * sizeof(uint32_t))); } /* * Handle L2 Cache. */ static uint32_t ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry, uint32_t * data_buff) { int i, k; int loop_cnt; int ret; uint32_t read_value; uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w; uint32_t tag_value, read_cnt; volatile uint8_t cntl_value_r; long timeout; uint32_t data; loop_cnt = cacheEntry->op_count; read_addr = cacheEntry->read_addr; cntrl_addr = cacheEntry->control_addr; cntl_value_w = (uint32_t) cacheEntry->write_value; tag_reg_addr = cacheEntry->tag_reg_addr; tag_value = cacheEntry->init_tag_value; read_cnt = cacheEntry->read_addr_cnt; for (i = 0; i < loop_cnt; i++) { ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0); if (ret) return (0); if (cacheEntry->write_value != 0) { ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0); if (ret) return (0); } if (cacheEntry->poll_mask != 0) { timeout = cacheEntry->poll_wait; ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1); if (ret) return (0); cntl_value_r = (uint8_t)data; while ((cntl_value_r & cacheEntry->poll_mask) != 0) { if (timeout) { qla_mdelay(__func__, 1); timeout--; } else break; ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1); if (ret) return (0); cntl_value_r = (uint8_t)data; } if (!timeout) { /* Report timeout error. * core dump capture failed * Skip remaining entries. * Write buffer out to file * Use driver specific fields in template header * to report this error. */ return (-1); } } addr = read_addr; for (k = 0; k < read_cnt; k++) { ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); if (ret) return (0); *data_buff++ = read_value; addr += cacheEntry->read_addr_stride; } tag_value += cacheEntry->tag_value_stride; } return (read_cnt * loop_cnt * sizeof(uint32_t)); } /* * Handle L1 Cache. */ static uint32_t ql_L1Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry, uint32_t *data_buff) { int ret; int i, k; int loop_cnt; uint32_t read_value; uint32_t addr, read_addr, cntrl_addr, tag_reg_addr; uint32_t tag_value, read_cnt; uint32_t cntl_value_w; loop_cnt = cacheEntry->op_count; read_addr = cacheEntry->read_addr; cntrl_addr = cacheEntry->control_addr; cntl_value_w = (uint32_t) cacheEntry->write_value; tag_reg_addr = cacheEntry->tag_reg_addr; tag_value = cacheEntry->init_tag_value; read_cnt = cacheEntry->read_addr_cnt; for (i = 0; i < loop_cnt; i++) { ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0); if (ret) return (0); ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0); if (ret) return (0); addr = read_addr; for (k = 0; k < read_cnt; k++) { ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); if (ret) return (0); *data_buff++ = read_value; addr += cacheEntry->read_addr_stride; } tag_value += cacheEntry->tag_value_stride; } return (read_cnt * loop_cnt * sizeof(uint32_t)); } /* * Reading OCM memory */ static uint32_t ql_rdocm(qla_host_t *ha, ql_minidump_entry_rdocm_t *ocmEntry, uint32_t *data_buff) { int i, loop_cnt; volatile uint32_t addr; volatile uint32_t value; addr = ocmEntry->read_addr; loop_cnt = ocmEntry->op_count; for (i = 0; i < loop_cnt; i++) { value = READ_REG32(ha, addr); *data_buff++ = value; addr += ocmEntry->read_addr_stride; } return (loop_cnt * sizeof(value)); } /* * Read memory */ static uint32_t ql_rdmem(qla_host_t *ha, ql_minidump_entry_rdmem_t *mem_entry, uint32_t *data_buff) { int ret; int i, loop_cnt; volatile uint32_t addr; q80_offchip_mem_val_t val; addr = mem_entry->read_addr; /* size in bytes / 16 */ loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4); for (i = 0; i < loop_cnt; i++) { ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1); if (ret) return (0); *data_buff++ = val.data_lo; *data_buff++ = val.data_hi; *data_buff++ = val.data_ulo; *data_buff++ = val.data_uhi; addr += (sizeof(uint32_t) * 4); } return (loop_cnt * (sizeof(uint32_t) * 4)); } /* * Read Rom */ static uint32_t ql_rdrom(qla_host_t *ha, ql_minidump_entry_rdrom_t *romEntry, uint32_t *data_buff) { int ret; int i, loop_cnt; uint32_t addr; uint32_t value; addr = romEntry->read_addr; loop_cnt = romEntry->read_data_size; /* This is size in bytes */ loop_cnt /= sizeof(value); for (i = 0; i < loop_cnt; i++) { ret = ql_rd_flash32(ha, addr, &value); if (ret) return (0); *data_buff++ = value; addr += sizeof(value); } return (loop_cnt * sizeof(value)); } /* * Read MUX data */ static uint32_t ql_rdmux(qla_host_t *ha, ql_minidump_entry_mux_t *muxEntry, uint32_t *data_buff) { int ret; int loop_cnt; uint32_t read_value, sel_value; uint32_t read_addr, select_addr; select_addr = muxEntry->select_addr; sel_value = muxEntry->select_value; read_addr = muxEntry->read_addr; for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) { ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0); if (ret) return (0); ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); if (ret) return (0); *data_buff++ = sel_value; *data_buff++ = read_value; sel_value += muxEntry->select_value_stride; } return (loop_cnt * (2 * sizeof(uint32_t))); } static uint32_t ql_rdmux2(qla_host_t *ha, ql_minidump_entry_mux2_t *muxEntry, uint32_t *data_buff) { int ret; int loop_cnt; uint32_t select_addr_1, select_addr_2; uint32_t select_value_1, select_value_2; uint32_t select_value_count, select_value_mask; uint32_t read_addr, read_value; select_addr_1 = muxEntry->select_addr_1; select_addr_2 = muxEntry->select_addr_2; select_value_1 = muxEntry->select_value_1; select_value_2 = muxEntry->select_value_2; select_value_count = muxEntry->select_value_count; select_value_mask = muxEntry->select_value_mask; read_addr = muxEntry->read_addr; for (loop_cnt = 0; loop_cnt < select_value_count; loop_cnt++) { uint32_t temp_sel_val; ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0); if (ret) return (0); temp_sel_val = select_value_1 & select_value_mask; ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0); if (ret) return (0); ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); if (ret) return (0); *data_buff++ = temp_sel_val; *data_buff++ = read_value; ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0); if (ret) return (0); temp_sel_val = select_value_2 & select_value_mask; ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0); if (ret) return (0); ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); if (ret) return (0); *data_buff++ = temp_sel_val; *data_buff++ = read_value; select_value_1 += muxEntry->select_value_stride; select_value_2 += muxEntry->select_value_stride; } return (loop_cnt * (4 * sizeof(uint32_t))); } /* * Handling Queue State Reads. */ static uint32_t ql_rdqueue(qla_host_t *ha, ql_minidump_entry_queue_t *queueEntry, uint32_t *data_buff) { int ret; int loop_cnt, k; uint32_t read_value; uint32_t read_addr, read_stride, select_addr; uint32_t queue_id, read_cnt; read_cnt = queueEntry->read_addr_cnt; read_stride = queueEntry->read_addr_stride; select_addr = queueEntry->select_addr; for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count; loop_cnt++) { ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0); if (ret) return (0); read_addr = queueEntry->read_addr; for (k = 0; k < read_cnt; k++) { ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); if (ret) return (0); *data_buff++ = read_value; read_addr += read_stride; } queue_id += queueEntry->queue_id_stride; } return (loop_cnt * (read_cnt * sizeof(uint32_t))); } /* * Handling control entries. */ static uint32_t ql_cntrl(qla_host_t *ha, ql_minidump_template_hdr_t *template_hdr, ql_minidump_entry_cntrl_t *crbEntry) { int ret; int count; uint32_t opcode, read_value, addr, entry_addr; long timeout; entry_addr = crbEntry->addr; for (count = 0; count < crbEntry->op_count; count++) { opcode = crbEntry->opcode; if (opcode & QL_DBG_OPCODE_WR) { ret = ql_rdwr_indreg32(ha, entry_addr, &crbEntry->value_1, 0); if (ret) return (0); opcode &= ~QL_DBG_OPCODE_WR; } if (opcode & QL_DBG_OPCODE_RW) { ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1); if (ret) return (0); ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0); if (ret) return (0); opcode &= ~QL_DBG_OPCODE_RW; } if (opcode & QL_DBG_OPCODE_AND) { ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1); if (ret) return (0); read_value &= crbEntry->value_2; opcode &= ~QL_DBG_OPCODE_AND; if (opcode & QL_DBG_OPCODE_OR) { read_value |= crbEntry->value_3; opcode &= ~QL_DBG_OPCODE_OR; } ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0); if (ret) return (0); } if (opcode & QL_DBG_OPCODE_OR) { ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1); if (ret) return (0); read_value |= crbEntry->value_3; ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0); if (ret) return (0); opcode &= ~QL_DBG_OPCODE_OR; } if (opcode & QL_DBG_OPCODE_POLL) { opcode &= ~QL_DBG_OPCODE_POLL; timeout = crbEntry->poll_timeout; addr = entry_addr; ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); if (ret) return (0); while ((read_value & crbEntry->value_2) != crbEntry->value_1) { if (timeout) { qla_mdelay(__func__, 1); timeout--; } else break; ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); if (ret) return (0); } if (!timeout) { /* * Report timeout error. * core dump capture failed * Skip remaining entries. * Write buffer out to file * Use driver specific fields in template header * to report this error. */ return (-1); } } if (opcode & QL_DBG_OPCODE_RDSTATE) { /* * decide which address to use. */ if (crbEntry->state_index_a) { addr = template_hdr->saved_state_array[ crbEntry-> state_index_a]; } else { addr = entry_addr; } ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); if (ret) return (0); template_hdr->saved_state_array[crbEntry->state_index_v] = read_value; opcode &= ~QL_DBG_OPCODE_RDSTATE; } if (opcode & QL_DBG_OPCODE_WRSTATE) { /* * decide which value to use. */ if (crbEntry->state_index_v) { read_value = template_hdr->saved_state_array[ crbEntry->state_index_v]; } else { read_value = crbEntry->value_1; } /* * decide which address to use. */ if (crbEntry->state_index_a) { addr = template_hdr->saved_state_array[ crbEntry-> state_index_a]; } else { addr = entry_addr; } ret = ql_rdwr_indreg32(ha, addr, &read_value, 0); if (ret) return (0); opcode &= ~QL_DBG_OPCODE_WRSTATE; } if (opcode & QL_DBG_OPCODE_MDSTATE) { /* Read value from saved state using index */ read_value = template_hdr->saved_state_array[ crbEntry->state_index_v]; read_value <<= crbEntry->shl; /*Shift left operation */ read_value >>= crbEntry->shr; /*Shift right operation */ if (crbEntry->value_2) { /* check if AND mask is provided */ read_value &= crbEntry->value_2; } read_value |= crbEntry->value_3; /* OR operation */ read_value += crbEntry->value_1; /* increment op */ /* Write value back to state area. */ template_hdr->saved_state_array[crbEntry->state_index_v] = read_value; opcode &= ~QL_DBG_OPCODE_MDSTATE; } entry_addr += crbEntry->addr_stride; } return (0); } /* * Handling rd poll entry. */ static uint32_t ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry, uint32_t *data_buff) { int ret; int loop_cnt; uint32_t op_count, select_addr, select_value_stride, select_value; uint32_t read_addr, poll, mask, data; uint32_t wait_count = 0; select_addr = entry->select_addr; read_addr = entry->read_addr; select_value = entry->select_value; select_value_stride = entry->select_value_stride; op_count = entry->op_count; poll = entry->poll; mask = entry->mask; for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) { ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0); if (ret) return (0); wait_count = 0; while (wait_count < poll) { uint32_t temp; ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1); if (ret) return (0); if ( (temp & mask) != 0 ) { break; } wait_count++; } if (wait_count == poll) { device_printf(ha->pci_dev, "%s: Error in processing entry\n", __func__); device_printf(ha->pci_dev, "%s: wait_count <0x%x> poll <0x%x>\n", __func__, wait_count, poll); return 0; } ret = ql_rdwr_indreg32(ha, read_addr, &data, 1); if (ret) return (0); *data_buff++ = select_value; *data_buff++ = data; select_value = select_value + select_value_stride; } /* * for testing purpose we return amount of data written */ return (loop_cnt * (2 * sizeof(uint32_t))); } /* * Handling rd modify write poll entry. */ static uint32_t ql_pollrd_modify_write(qla_host_t *ha, ql_minidump_entry_rd_modify_wr_with_poll_t *entry, uint32_t *data_buff) { int ret; uint32_t addr_1, addr_2, value_1, value_2, data; uint32_t poll, mask, modify_mask; uint32_t wait_count = 0; addr_1 = entry->addr_1; addr_2 = entry->addr_2; value_1 = entry->value_1; value_2 = entry->value_2; poll = entry->poll; mask = entry->mask; modify_mask = entry->modify_mask; ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0); if (ret) return (0); wait_count = 0; while (wait_count < poll) { uint32_t temp; ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1); if (ret) return (0); if ( (temp & mask) != 0 ) { break; } wait_count++; } if (wait_count == poll) { device_printf(ha->pci_dev, "%s Error in processing entry\n", __func__); } else { ret = ql_rdwr_indreg32(ha, addr_2, &data, 1); if (ret) return (0); data = (data & modify_mask); ret = ql_rdwr_indreg32(ha, addr_2, &data, 0); if (ret) return (0); ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0); if (ret) return (0); /* Poll again */ wait_count = 0; while (wait_count < poll) { uint32_t temp; ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1); if (ret) return (0); if ( (temp & mask) != 0 ) { break; } wait_count++; } *data_buff++ = addr_2; *data_buff++ = data; } /* * for testing purpose we return amount of data written */ return (2 * sizeof(uint32_t)); } diff --git a/sys/dev/qlxgbe/ql_isr.c b/sys/dev/qlxgbe/ql_isr.c index c31d5369e0d6..0d1ae164c7ae 100644 --- a/sys/dev/qlxgbe/ql_isr.c +++ b/sys/dev/qlxgbe/ql_isr.c @@ -1,999 +1,1006 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013-2016 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * File: ql_isr.c * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. */ #include __FBSDID("$FreeBSD$"); #include "ql_os.h" #include "ql_hw.h" #include "ql_def.h" #include "ql_inline.h" #include "ql_ver.h" #include "ql_glbl.h" #include "ql_dbg.h" static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp, uint32_t r_idx); static void qla_rcv_error(qla_host_t *ha) { ha->stop_rcv = 1; QL_INITIATE_RECOVERY(ha); } /* * Name: qla_rx_intr * Function: Handles normal ethernet frames received */ static void qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx) { qla_rx_buf_t *rxb; struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL; struct ifnet *ifp = ha->ifp; qla_sds_t *sdsp; struct ether_vlan_header *eh; uint32_t i, rem_len = 0; uint32_t r_idx = 0; qla_rx_ring_t *rx_ring; +#if defined(INET) || defined(INET6) struct lro_ctrl *lro; lro = &ha->hw.sds[sds_idx].lro; +#endif if (ha->hw.num_rds_rings > 1) r_idx = sds_idx; ha->hw.rds[r_idx].count++; sdsp = &ha->hw.sds[sds_idx]; rx_ring = &ha->rx_ring[r_idx]; for (i = 0; i < sgc->num_handles; i++) { rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF]; QL_ASSERT(ha, (rxb != NULL), ("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\ sds_idx)); if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_RX_RXB_INVAL)) { /* log the error */ device_printf(ha->pci_dev, "%s invalid rxb[%d, %d, 0x%04x]\n", __func__, sds_idx, i, sgc->handle[i]); qla_rcv_error(ha); return; } mp = rxb->m_head; if (i == 0) mpf = mp; QL_ASSERT(ha, (mp != NULL), ("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\ sds_idx)); bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD); rxb->m_head = NULL; rxb->next = sdsp->rxb_free; sdsp->rxb_free = rxb; sdsp->rx_free++; if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_RX_MP_NULL)) { /* log the error */ device_printf(ha->pci_dev, "%s mp == NULL [%d, %d, 0x%04x]\n", __func__, sds_idx, i, sgc->handle[i]); qla_rcv_error(ha); return; } if (i == 0) { mpl = mpf = mp; mp->m_flags |= M_PKTHDR; mp->m_pkthdr.len = sgc->pkt_length; mp->m_pkthdr.rcvif = ifp; rem_len = mp->m_pkthdr.len; } else { mp->m_flags &= ~M_PKTHDR; mpl->m_next = mp; mpl = mp; rem_len = rem_len - mp->m_len; } } mpl->m_len = rem_len; eh = mtod(mpf, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { uint32_t *data = (uint32_t *)eh; mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag); mpf->m_flags |= M_VLANTAG; *(data + 3) = *(data + 2); *(data + 2) = *(data + 1); *(data + 1) = *data; m_adj(mpf, ETHER_VLAN_ENCAP_LEN); } if (sgc->chksum_status == Q8_STAT_DESC_STATUS_CHKSUM_OK) { mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR; mpf->m_pkthdr.csum_data = 0xFFFF; } else { mpf->m_pkthdr.csum_flags = 0; } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); mpf->m_pkthdr.flowid = sgc->rss_hash; #if __FreeBSD_version >= 1100000 M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE_HASH); #else #if (__FreeBSD_version >= 903511 && __FreeBSD_version < 1100000) M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE); #else M_HASHTYPE_SET(mpf, M_HASHTYPE_NONE); #endif #endif /* #if __FreeBSD_version >= 1100000 */ +#if defined(INET) || defined(INET6) if (ha->hw.enable_soft_lro) { #if (__FreeBSD_version >= 1100101) tcp_lro_queue_mbuf(lro, mpf); #else if (tcp_lro_rx(lro, mpf, 0)) (*ifp->if_input)(ifp, mpf); #endif /* #if (__FreeBSD_version >= 1100101) */ - } else { + } else +#endif + { (*ifp->if_input)(ifp, mpf); } if (sdsp->rx_free > ha->std_replenish) qla_replenish_normal_rx(ha, sdsp, r_idx); return; } #define QLA_TCP_HDR_SIZE 20 #define QLA_TCP_TS_OPTION_SIZE 12 /* * Name: qla_lro_intr * Function: Handles normal ethernet frames received */ static int qla_lro_intr(qla_host_t *ha, qla_sgl_lro_t *sgc, uint32_t sds_idx) { qla_rx_buf_t *rxb; struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL; struct ifnet *ifp = ha->ifp; qla_sds_t *sdsp; struct ether_vlan_header *eh; uint32_t i, rem_len = 0, pkt_length, iplen; struct tcphdr *th; struct ip *ip = NULL; struct ip6_hdr *ip6 = NULL; uint16_t etype; uint32_t r_idx = 0; qla_rx_ring_t *rx_ring; if (ha->hw.num_rds_rings > 1) r_idx = sds_idx; ha->hw.rds[r_idx].count++; rx_ring = &ha->rx_ring[r_idx]; ha->hw.rds[r_idx].lro_pkt_count++; sdsp = &ha->hw.sds[sds_idx]; pkt_length = sgc->payload_length + sgc->l4_offset; if (sgc->flags & Q8_LRO_COMP_TS) { pkt_length += QLA_TCP_HDR_SIZE + QLA_TCP_TS_OPTION_SIZE; } else { pkt_length += QLA_TCP_HDR_SIZE; } ha->hw.rds[r_idx].lro_bytes += pkt_length; for (i = 0; i < sgc->num_handles; i++) { rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF]; QL_ASSERT(ha, (rxb != NULL), ("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\ sds_idx)); if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_RXB_INVAL)) { /* log the error */ device_printf(ha->pci_dev, "%s invalid rxb[%d, %d, 0x%04x]\n", __func__, sds_idx, i, sgc->handle[i]); qla_rcv_error(ha); return (0); } mp = rxb->m_head; if (i == 0) mpf = mp; QL_ASSERT(ha, (mp != NULL), ("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\ sds_idx)); bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD); rxb->m_head = NULL; rxb->next = sdsp->rxb_free; sdsp->rxb_free = rxb; sdsp->rx_free++; if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_MP_NULL)) { /* log the error */ device_printf(ha->pci_dev, "%s mp == NULL [%d, %d, 0x%04x]\n", __func__, sds_idx, i, sgc->handle[i]); qla_rcv_error(ha); return (0); } if (i == 0) { mpl = mpf = mp; mp->m_flags |= M_PKTHDR; mp->m_pkthdr.len = pkt_length; mp->m_pkthdr.rcvif = ifp; rem_len = mp->m_pkthdr.len; } else { mp->m_flags &= ~M_PKTHDR; mpl->m_next = mp; mpl = mp; rem_len = rem_len - mp->m_len; } } mpl->m_len = rem_len; th = (struct tcphdr *)(mpf->m_data + sgc->l4_offset); if (sgc->flags & Q8_LRO_COMP_PUSH_BIT) th->th_flags |= TH_PUSH; m_adj(mpf, sgc->l2_offset); eh = mtod(mpf, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { uint32_t *data = (uint32_t *)eh; mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag); mpf->m_flags |= M_VLANTAG; *(data + 3) = *(data + 2); *(data + 2) = *(data + 1); *(data + 1) = *data; m_adj(mpf, ETHER_VLAN_ENCAP_LEN); etype = ntohs(eh->evl_proto); } else { etype = ntohs(eh->evl_encap_proto); } if (etype == ETHERTYPE_IP) { ip = (struct ip *)(mpf->m_data + ETHER_HDR_LEN); iplen = (ip->ip_hl << 2) + (th->th_off << 2) + sgc->payload_length; ip->ip_len = htons(iplen); ha->ipv4_lro++; M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV4); } else if (etype == ETHERTYPE_IPV6) { ip6 = (struct ip6_hdr *)(mpf->m_data + ETHER_HDR_LEN); iplen = (th->th_off << 2) + sgc->payload_length; ip6->ip6_plen = htons(iplen); ha->ipv6_lro++; M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV6); } else { m_freem(mpf); if (sdsp->rx_free > ha->std_replenish) qla_replenish_normal_rx(ha, sdsp, r_idx); return 0; } mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR; mpf->m_pkthdr.csum_data = 0xFFFF; mpf->m_pkthdr.flowid = sgc->rss_hash; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); (*ifp->if_input)(ifp, mpf); if (sdsp->rx_free > ha->std_replenish) qla_replenish_normal_rx(ha, sdsp, r_idx); return (0); } static int qla_rcv_cont_sds(qla_host_t *ha, uint32_t sds_idx, uint32_t comp_idx, uint32_t dcount, uint16_t *handle, uint16_t *nhandles) { uint32_t i; uint16_t num_handles; q80_stat_desc_t *sdesc; uint32_t opcode; *nhandles = 0; dcount--; for (i = 0; i < dcount; i++) { comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1); sdesc = (q80_stat_desc_t *) &ha->hw.sds[sds_idx].sds_ring_base[comp_idx]; opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1])); if (!opcode || QL_ERR_INJECT(ha, INJCT_INV_CONT_OPCODE)) { device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n", __func__, (void *)sdesc->data[0], (void *)sdesc->data[1]); return -1; } num_handles = Q8_SGL_STAT_DESC_NUM_HANDLES((sdesc->data[1])); if (!num_handles) { device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n", __func__, (void *)sdesc->data[0], (void *)sdesc->data[1]); return -1; } if (QL_ERR_INJECT(ha, INJCT_NUM_HNDLE_INVALID)) num_handles = -1; switch (num_handles) { case 1: *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0])); break; case 2: *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0])); break; case 3: *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0])); break; case 4: *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0])); break; case 5: *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1])); break; case 6: *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1])); *handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1])); break; case 7: *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1])); *handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1])); *handle++ = Q8_SGL_STAT_DESC_HANDLE7((sdesc->data[1])); break; default: device_printf(ha->pci_dev, "%s: invalid num handles %p %p\n", __func__, (void *)sdesc->data[0], (void *)sdesc->data[1]); QL_ASSERT(ha, (0),\ ("%s: %s [nh, sds, d0, d1]=[%d, %d, %p, %p]\n", __func__, "invalid num handles", sds_idx, num_handles, (void *)sdesc->data[0],(void *)sdesc->data[1])); qla_rcv_error(ha); return 0; } *nhandles = *nhandles + num_handles; } return 0; } /* * Name: ql_rcv_isr * Function: Main Interrupt Service Routine */ uint32_t ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count) { device_t dev; qla_hw_t *hw; uint32_t comp_idx, c_idx = 0, desc_count = 0, opcode; volatile q80_stat_desc_t *sdesc, *sdesc0 = NULL; uint32_t ret = 0; qla_sgl_comp_t sgc; uint16_t nhandles; uint32_t sds_replenish_threshold = 0; uint32_t r_idx = 0; qla_sds_t *sdsp; dev = ha->pci_dev; hw = &ha->hw; hw->sds[sds_idx].rcv_active = 1; if (ha->stop_rcv) { hw->sds[sds_idx].rcv_active = 0; return 0; } QL_DPRINT2(ha, (dev, "%s: [%d]enter\n", __func__, sds_idx)); /* * receive interrupts */ comp_idx = hw->sds[sds_idx].sdsr_next; while (count-- && !ha->stop_rcv) { sdesc = (q80_stat_desc_t *) &hw->sds[sds_idx].sds_ring_base[comp_idx]; opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1])); if (!opcode) break; switch (opcode) { case Q8_STAT_DESC_OPCODE_RCV_PKT: desc_count = 1; bzero(&sgc, sizeof(qla_sgl_comp_t)); sgc.rcv.pkt_length = Q8_STAT_DESC_TOTAL_LENGTH((sdesc->data[0])); sgc.rcv.num_handles = 1; sgc.rcv.handle[0] = Q8_STAT_DESC_HANDLE((sdesc->data[0])); sgc.rcv.chksum_status = Q8_STAT_DESC_STATUS((sdesc->data[1])); sgc.rcv.rss_hash = Q8_STAT_DESC_RSS_HASH((sdesc->data[0])); if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) { sgc.rcv.vlan_tag = Q8_STAT_DESC_VLAN_ID((sdesc->data[1])); } qla_rx_intr(ha, &sgc.rcv, sds_idx); break; case Q8_STAT_DESC_OPCODE_SGL_RCV: desc_count = Q8_STAT_DESC_COUNT_SGL_RCV((sdesc->data[1])); if (desc_count > 1) { c_idx = (comp_idx + desc_count -1) & (NUM_STATUS_DESCRIPTORS-1); sdesc0 = (q80_stat_desc_t *) &hw->sds[sds_idx].sds_ring_base[c_idx]; if ((Q8_STAT_DESC_OPCODE((sdesc0->data[1])) != Q8_STAT_DESC_OPCODE_CONT) || QL_ERR_INJECT(ha, INJCT_SGL_RCV_INV_DESC_COUNT)) { desc_count = 0; break; } } bzero(&sgc, sizeof(qla_sgl_comp_t)); sgc.rcv.pkt_length = Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV(\ (sdesc->data[0])); sgc.rcv.chksum_status = Q8_STAT_DESC_STATUS((sdesc->data[1])); sgc.rcv.rss_hash = Q8_STAT_DESC_RSS_HASH((sdesc->data[0])); if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) { sgc.rcv.vlan_tag = Q8_STAT_DESC_VLAN_ID((sdesc->data[1])); } QL_ASSERT(ha, (desc_count <= 2) ,\ ("%s: [sds_idx, data0, data1]="\ "%d, %p, %p]\n", __func__, sds_idx,\ (void *)sdesc->data[0],\ (void *)sdesc->data[1])); sgc.rcv.num_handles = 1; sgc.rcv.handle[0] = Q8_STAT_DESC_HANDLE((sdesc->data[0])); if (qla_rcv_cont_sds(ha, sds_idx, comp_idx, desc_count, &sgc.rcv.handle[1], &nhandles)) { device_printf(dev, "%s: [sds_idx, dcount, data0, data1]=" "[%d, %d, 0x%llx, 0x%llx]\n", __func__, sds_idx, desc_count, (long long unsigned int)sdesc->data[0], (long long unsigned int)sdesc->data[1]); desc_count = 0; break; } sgc.rcv.num_handles += nhandles; qla_rx_intr(ha, &sgc.rcv, sds_idx); break; case Q8_STAT_DESC_OPCODE_SGL_LRO: desc_count = Q8_STAT_DESC_COUNT_SGL_LRO((sdesc->data[1])); if (desc_count > 1) { c_idx = (comp_idx + desc_count -1) & (NUM_STATUS_DESCRIPTORS-1); sdesc0 = (q80_stat_desc_t *) &hw->sds[sds_idx].sds_ring_base[c_idx]; if ((Q8_STAT_DESC_OPCODE((sdesc0->data[1])) != Q8_STAT_DESC_OPCODE_CONT) || QL_ERR_INJECT(ha, INJCT_SGL_LRO_INV_DESC_COUNT)) { desc_count = 0; break; } } bzero(&sgc, sizeof(qla_sgl_comp_t)); sgc.lro.payload_length = Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV((sdesc->data[0])); sgc.lro.rss_hash = Q8_STAT_DESC_RSS_HASH((sdesc->data[0])); sgc.lro.num_handles = 1; sgc.lro.handle[0] = Q8_STAT_DESC_HANDLE((sdesc->data[0])); if (Q8_SGL_LRO_STAT_TS((sdesc->data[1]))) sgc.lro.flags |= Q8_LRO_COMP_TS; if (Q8_SGL_LRO_STAT_PUSH_BIT((sdesc->data[1]))) sgc.lro.flags |= Q8_LRO_COMP_PUSH_BIT; sgc.lro.l2_offset = Q8_SGL_LRO_STAT_L2_OFFSET((sdesc->data[1])); sgc.lro.l4_offset = Q8_SGL_LRO_STAT_L4_OFFSET((sdesc->data[1])); if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) { sgc.lro.vlan_tag = Q8_STAT_DESC_VLAN_ID((sdesc->data[1])); } QL_ASSERT(ha, (desc_count <= 7) ,\ ("%s: [sds_idx, data0, data1]="\ "[%d, 0x%llx, 0x%llx]\n",\ __func__, sds_idx,\ (long long unsigned int)sdesc->data[0],\ (long long unsigned int)sdesc->data[1])); if (qla_rcv_cont_sds(ha, sds_idx, comp_idx, desc_count, &sgc.lro.handle[1], &nhandles)) { device_printf(dev, "%s: [sds_idx, data0, data1]="\ "[%d, 0x%llx, 0x%llx]\n",\ __func__, sds_idx,\ (long long unsigned int)sdesc->data[0],\ (long long unsigned int)sdesc->data[1]); desc_count = 0; break; } sgc.lro.num_handles += nhandles; if (qla_lro_intr(ha, &sgc.lro, sds_idx)) { device_printf(dev, "%s: [sds_idx, data0, data1]="\ "[%d, 0x%llx, 0x%llx]\n",\ __func__, sds_idx,\ (long long unsigned int)sdesc->data[0],\ (long long unsigned int)sdesc->data[1]); device_printf(dev, "%s: [comp_idx, c_idx, dcount, nhndls]="\ "[%d, %d, %d, %d]\n",\ __func__, comp_idx, c_idx, desc_count, sgc.lro.num_handles); if (desc_count > 1) { device_printf(dev, "%s: [sds_idx, data0, data1]="\ "[%d, 0x%llx, 0x%llx]\n",\ __func__, sds_idx,\ (long long unsigned int)sdesc0->data[0],\ (long long unsigned int)sdesc0->data[1]); } } break; default: desc_count = 0; device_printf(dev, "%s: default 0x%llx!\n", __func__, (long long unsigned int)sdesc->data[0]); break; } if (desc_count == 0) break; sds_replenish_threshold += desc_count; while (desc_count--) { sdesc->data[0] = 0ULL; sdesc->data[1] = 0ULL; comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1); sdesc = (q80_stat_desc_t *) &hw->sds[sds_idx].sds_ring_base[comp_idx]; } if (sds_replenish_threshold > ha->hw.sds_cidx_thres) { sds_replenish_threshold = 0; if (hw->sds[sds_idx].sdsr_next != comp_idx) { QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx,\ comp_idx); } hw->sds[sds_idx].sdsr_next = comp_idx; } } +#if defined(INET) || defined(INET6) if (ha->hw.enable_soft_lro) { struct lro_ctrl *lro; lro = &ha->hw.sds[sds_idx].lro; #if (__FreeBSD_version >= 1100101) tcp_lro_flush_all(lro); #else struct lro_entry *queued; while ((!SLIST_EMPTY(&lro->lro_active))) { queued = SLIST_FIRST(&lro->lro_active); SLIST_REMOVE_HEAD(&lro->lro_active, next); tcp_lro_flush(lro, queued); } #endif /* #if (__FreeBSD_version >= 1100101) */ } +#endif if (ha->stop_rcv) goto ql_rcv_isr_exit; if (hw->sds[sds_idx].sdsr_next != comp_idx) { QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx); hw->sds[sds_idx].sdsr_next = comp_idx; } else { if (ha->hw.num_rds_rings > 1) r_idx = sds_idx; sdsp = &ha->hw.sds[sds_idx]; if (sdsp->rx_free > ha->std_replenish) qla_replenish_normal_rx(ha, sdsp, r_idx); } sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx]; opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1])); if (opcode) ret = -1; ql_rcv_isr_exit: hw->sds[sds_idx].rcv_active = 0; return (ret); } void ql_mbx_isr(void *arg) { qla_host_t *ha; uint32_t data; uint32_t prev_link_state; ha = arg; if (ha == NULL) { printf("%s: arg == NULL\n", __func__); return; } data = READ_REG32(ha, Q8_FW_MBOX_CNTRL); if ((data & 0x3) != 0x1) { WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0); return; } data = READ_REG32(ha, Q8_FW_MBOX0); if ((data & 0xF000) != 0x8000) return; data = data & 0xFFFF; switch (data) { case 0x8001: /* It's an AEN */ ha->hw.cable_oui = READ_REG32(ha, (Q8_FW_MBOX0 + 4)); data = READ_REG32(ha, (Q8_FW_MBOX0 + 8)); ha->hw.cable_length = data & 0xFFFF; data = data >> 16; ha->hw.link_speed = data & 0xFFF; data = READ_REG32(ha, (Q8_FW_MBOX0 + 12)); prev_link_state = ha->hw.link_up; data = (((data & 0xFF) == 0) ? 0 : 1); atomic_store_rel_8(&ha->hw.link_up, (uint8_t)data); device_printf(ha->pci_dev, "%s: AEN[0x8001] data = 0x%08x, prev_link_state = 0x%08x\n", __func__, data, prev_link_state); if (prev_link_state != ha->hw.link_up) { if (ha->hw.link_up) if_link_state_change(ha->ifp, LINK_STATE_UP); else if_link_state_change(ha->ifp, LINK_STATE_DOWN); } ha->hw.module_type = ((data >> 8) & 0xFF); ha->hw.fduplex = (((data & 0xFF0000) == 0) ? 0 : 1); ha->hw.autoneg = (((data & 0xFF000000) == 0) ? 0 : 1); data = READ_REG32(ha, (Q8_FW_MBOX0 + 16)); ha->hw.loopback_mode = data & 0x03; ha->hw.link_faults = (data >> 3) & 0xFF; break; case 0x8100: device_printf(ha->pci_dev, "%s: AEN[0x%08x]\n", __func__, data); ha->hw.imd_compl=1; break; case 0x8101: ha->async_event = 1; ha->hw.aen_mb0 = 0x8101; ha->hw.aen_mb1 = READ_REG32(ha, (Q8_FW_MBOX0 + 4)); ha->hw.aen_mb2 = READ_REG32(ha, (Q8_FW_MBOX0 + 8)); ha->hw.aen_mb3 = READ_REG32(ha, (Q8_FW_MBOX0 + 12)); ha->hw.aen_mb4 = READ_REG32(ha, (Q8_FW_MBOX0 + 16)); device_printf(ha->pci_dev, "%s: AEN[0x%08x 0x%08x 0x%08x 0%08x 0x%08x]\n", __func__, data, ha->hw.aen_mb1, ha->hw.aen_mb2, ha->hw.aen_mb3, ha->hw.aen_mb4); break; case 0x8110: /* for now just dump the registers */ { uint32_t ombx[5]; ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4)); ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8)); ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12)); ombx[3] = READ_REG32(ha, (Q8_FW_MBOX0 + 16)); ombx[4] = READ_REG32(ha, (Q8_FW_MBOX0 + 20)); device_printf(ha->pci_dev, "%s: " "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", __func__, data, ombx[0], ombx[1], ombx[2], ombx[3], ombx[4]); } break; case 0x8130: /* sfp insertion aen */ device_printf(ha->pci_dev, "%s: sfp inserted [0x%08x]\n", __func__, READ_REG32(ha, (Q8_FW_MBOX0 + 4))); break; case 0x8131: /* sfp removal aen */ device_printf(ha->pci_dev, "%s: sfp removed]\n", __func__); break; case 0x8140: { uint32_t ombx[3]; ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4)); ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8)); ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12)); device_printf(ha->pci_dev, "%s: " "0x%08x 0x%08x 0x%08x 0x%08x \n", __func__, data, ombx[0], ombx[1], ombx[2]); } break; default: device_printf(ha->pci_dev, "%s: AEN[0x%08x]\n", __func__, data); break; } WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0); WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0); return; } static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp, uint32_t r_idx) { qla_rx_buf_t *rxb; int count = sdsp->rx_free; uint32_t rx_next; qla_rdesc_t *rdesc; /* we can play with this value via a sysctl */ uint32_t replenish_thresh = ha->hw.rds_pidx_thres; rdesc = &ha->hw.rds[r_idx]; rx_next = rdesc->rx_next; while (count--) { rxb = sdsp->rxb_free; if (rxb == NULL) break; sdsp->rxb_free = rxb->next; sdsp->rx_free--; if (ql_get_mbuf(ha, rxb, NULL) == 0) { qla_set_hw_rcv_desc(ha, r_idx, rdesc->rx_in, rxb->handle, rxb->paddr, (rxb->m_head)->m_pkthdr.len); rdesc->rx_in++; if (rdesc->rx_in == NUM_RX_DESCRIPTORS) rdesc->rx_in = 0; rdesc->rx_next++; if (rdesc->rx_next == NUM_RX_DESCRIPTORS) rdesc->rx_next = 0; } else { device_printf(ha->pci_dev, "%s: qla_get_mbuf [(%d),(%d),(%d)] failed\n", __func__, r_idx, rdesc->rx_in, rxb->handle); rxb->m_head = NULL; rxb->next = sdsp->rxb_free; sdsp->rxb_free = rxb; sdsp->rx_free++; break; } if (replenish_thresh-- == 0) { QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std, rdesc->rx_next); rx_next = rdesc->rx_next; replenish_thresh = ha->hw.rds_pidx_thres; } } if (rx_next != rdesc->rx_next) { QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std, rdesc->rx_next); } } void ql_isr(void *arg) { qla_ivec_t *ivec = arg; qla_host_t *ha ; int idx; qla_hw_t *hw; struct ifnet *ifp; qla_tx_fp_t *fp; ha = ivec->ha; hw = &ha->hw; ifp = ha->ifp; if ((idx = ivec->sds_idx) >= ha->hw.num_sds_rings) return; fp = &ha->tx_fp[idx]; hw->sds[idx].intr_count++; if ((fp->fp_taskqueue != NULL) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); return; } diff --git a/sys/dev/qlxgbe/ql_os.c b/sys/dev/qlxgbe/ql_os.c index 2a7a173ece41..62289eb878e2 100644 --- a/sys/dev/qlxgbe/ql_os.c +++ b/sys/dev/qlxgbe/ql_os.c @@ -1,2259 +1,2263 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013-2016 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * File: ql_os.c * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. */ #include __FBSDID("$FreeBSD$"); #include "ql_os.h" #include "ql_hw.h" #include "ql_def.h" #include "ql_inline.h" #include "ql_ver.h" #include "ql_glbl.h" #include "ql_dbg.h" #include /* * Some PCI Configuration Space Related Defines */ #ifndef PCI_VENDOR_QLOGIC #define PCI_VENDOR_QLOGIC 0x1077 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP8030 #define PCI_PRODUCT_QLOGIC_ISP8030 0x8030 #endif #define PCI_QLOGIC_ISP8030 \ ((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC) /* * static functions */ static int qla_alloc_parent_dma_tag(qla_host_t *ha); static void qla_free_parent_dma_tag(qla_host_t *ha); static int qla_alloc_xmt_bufs(qla_host_t *ha); static void qla_free_xmt_bufs(qla_host_t *ha); static int qla_alloc_rcv_bufs(qla_host_t *ha); static void qla_free_rcv_bufs(qla_host_t *ha); static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb); static void qla_init_ifnet(device_t dev, qla_host_t *ha); static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS); static void qla_release(qla_host_t *ha); static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error); static void qla_stop(qla_host_t *ha); static void qla_get_peer(qla_host_t *ha); static void qla_error_recovery(void *context, int pending); static void qla_async_event(void *context, int pending); static void qla_stats(void *context, int pending); static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx, uint32_t iscsi_pdu); /* * Hooks to the Operating Systems */ static int qla_pci_probe (device_t); static int qla_pci_attach (device_t); static int qla_pci_detach (device_t); static void qla_init(void *arg); static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); static int qla_media_change(struct ifnet *ifp); static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); static int qla_transmit(struct ifnet *ifp, struct mbuf *mp); static void qla_qflush(struct ifnet *ifp); static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp); static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp); static int qla_create_fp_taskqueues(qla_host_t *ha); static void qla_destroy_fp_taskqueues(qla_host_t *ha); static void qla_drain_fp_taskqueues(qla_host_t *ha); static device_method_t qla_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, qla_pci_probe), DEVMETHOD(device_attach, qla_pci_attach), DEVMETHOD(device_detach, qla_pci_detach), { 0, 0 } }; static driver_t qla_pci_driver = { "ql", qla_pci_methods, sizeof (qla_host_t), }; static devclass_t qla83xx_devclass; DRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0); MODULE_DEPEND(qla83xx, pci, 1, 1, 1); MODULE_DEPEND(qla83xx, ether, 1, 1, 1); MALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver"); #define QL_STD_REPLENISH_THRES 0 #define QL_JUMBO_REPLENISH_THRES 32 static char dev_str[64]; static char ver_str[64]; /* * Name: qla_pci_probe * Function: Validate the PCI device to be a QLA80XX device */ static int qla_pci_probe(device_t dev) { switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { case PCI_QLOGIC_ISP8030: snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d", "Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function", QLA_VERSION_MAJOR, QLA_VERSION_MINOR, QLA_VERSION_BUILD); snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d", QLA_VERSION_MAJOR, QLA_VERSION_MINOR, QLA_VERSION_BUILD); device_set_desc(dev, dev_str); break; default: return (ENXIO); } if (bootverbose) printf("%s: %s\n ", __func__, dev_str); return (BUS_PROBE_DEFAULT); } static void qla_add_sysctls(qla_host_t *ha) { device_t dev = ha->pci_dev; SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "version", CTLFLAG_RD, ver_str, 0, "Driver Version"); SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "fw_version", CTLFLAG_RD, ha->fw_ver_str, 0, "firmware version"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0, qla_sysctl_get_link_status, "I", "Link Status"); ha->dbg_level = 0; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &ha->dbg_level, ha->dbg_level, "Debug Level"); ha->enable_minidump = 1; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "enable_minidump", CTLFLAG_RW, &ha->enable_minidump, ha->enable_minidump, "Minidump retrival prior to error recovery " "is enabled only when this is set"); ha->enable_driverstate_dump = 1; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "enable_driverstate_dump", CTLFLAG_RW, &ha->enable_driverstate_dump, ha->enable_driverstate_dump, "Driver State retrival prior to error recovery " "is enabled only when this is set"); ha->enable_error_recovery = 1; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "enable_error_recovery", CTLFLAG_RW, &ha->enable_error_recovery, ha->enable_error_recovery, "when set error recovery is enabled on fatal errors " "otherwise the port is turned offline"); ha->ms_delay_after_init = 1000; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "ms_delay_after_init", CTLFLAG_RW, &ha->ms_delay_after_init, ha->ms_delay_after_init, "millisecond delay after hw_init"); ha->std_replenish = QL_STD_REPLENISH_THRES; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "std_replenish", CTLFLAG_RW, &ha->std_replenish, ha->std_replenish, "Threshold for Replenishing Standard Frames"); SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "ipv4_lro", CTLFLAG_RD, &ha->ipv4_lro, "number of ipv4 lro completions"); SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "ipv6_lro", CTLFLAG_RD, &ha->ipv6_lro, "number of ipv6 lro completions"); SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "tx_tso_frames", CTLFLAG_RD, &ha->tx_tso_frames, "number of Tx TSO Frames"); SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "hw_vlan_tx_frames", CTLFLAG_RD, &ha->hw_vlan_tx_frames, "number of Tx VLAN Frames"); SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "hw_lock_failed", CTLFLAG_RD, &ha->hw_lock_failed, "number of hw_lock failures"); return; } static void qla_watchdog(void *arg) { qla_host_t *ha = arg; struct ifnet *ifp; ifp = ha->ifp; if (ha->qla_watchdog_exit) { ha->qla_watchdog_exited = 1; return; } ha->qla_watchdog_exited = 0; if (!ha->qla_watchdog_pause) { if (!ha->offline && (ql_hw_check_health(ha) || ha->qla_initiate_recovery || (ha->msg_from_peer == QL_PEER_MSG_RESET))) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ql_update_link_state(ha); if (ha->enable_error_recovery) { ha->qla_watchdog_paused = 1; ha->qla_watchdog_pause = 1; ha->err_inject = 0; device_printf(ha->pci_dev, "%s: taskqueue_enqueue(err_task) \n", __func__); taskqueue_enqueue(ha->err_tq, &ha->err_task); } else { if (ifp != NULL) ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ha->offline = 1; } return; } else { if (ha->qla_interface_up) { ha->watchdog_ticks++; if (ha->watchdog_ticks > 1000) ha->watchdog_ticks = 0; if (!ha->watchdog_ticks && QL_RUNNING(ifp)) { taskqueue_enqueue(ha->stats_tq, &ha->stats_task); } if (ha->async_event) { taskqueue_enqueue(ha->async_event_tq, &ha->async_event_task); } } ha->qla_watchdog_paused = 0; } } else { ha->qla_watchdog_paused = 1; } callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, qla_watchdog, ha); } /* * Name: qla_pci_attach * Function: attaches the device to the operating system */ static int qla_pci_attach(device_t dev) { qla_host_t *ha = NULL; uint32_t rsrc_len; int i; uint32_t num_rcvq = 0; if ((ha = device_get_softc(dev)) == NULL) { device_printf(dev, "cannot get softc\n"); return (ENOMEM); } memset(ha, 0, sizeof (qla_host_t)); if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) { device_printf(dev, "device is not ISP8030\n"); return (ENXIO); } ha->pci_func = pci_get_function(dev) & 0x1; ha->pci_dev = dev; pci_enable_busmaster(dev); ha->reg_rid = PCIR_BAR(0); ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, RF_ACTIVE); if (ha->pci_reg == NULL) { device_printf(dev, "unable to map any ports\n"); goto qla_pci_attach_err; } rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, ha->reg_rid); mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); mtx_init(&ha->sp_log_lock, "qla83xx_sp_log_lock", MTX_NETWORK_LOCK, MTX_DEF); ha->flags.lock_init = 1; qla_add_sysctls(ha); ha->hw.num_sds_rings = MAX_SDS_RINGS; ha->hw.num_rds_rings = MAX_RDS_RINGS; ha->hw.num_tx_rings = NUM_TX_RINGS; ha->reg_rid1 = PCIR_BAR(2); ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid1, RF_ACTIVE); ha->msix_count = pci_msix_count(dev); if (ha->msix_count < 1 ) { device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, ha->msix_count); goto qla_pci_attach_err; } if (ha->msix_count < (ha->hw.num_sds_rings + 1)) { ha->hw.num_sds_rings = ha->msix_count - 1; } QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x" " msix_count 0x%x pci_reg %p pci_reg1 %p\n", __func__, ha, ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg, ha->pci_reg1)); /* initialize hardware */ if (ql_init_hw(ha)) { device_printf(dev, "%s: ql_init_hw failed\n", __func__); goto qla_pci_attach_err; } device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__, ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, ha->fw_ver_build); snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d", ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, ha->fw_ver_build); if (qla_get_nic_partition(ha, NULL, &num_rcvq)) { device_printf(dev, "%s: qla_get_nic_partition failed\n", __func__); goto qla_pci_attach_err; } device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x" " msix_count 0x%x pci_reg %p pci_reg1 %p num_rcvq = %d\n", __func__, ha, ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg, ha->pci_reg1, num_rcvq); if ((ha->msix_count < 64) || (num_rcvq != 32)) { if (ha->hw.num_sds_rings > 15) { ha->hw.num_sds_rings = 15; } } ha->hw.num_rds_rings = ha->hw.num_sds_rings; ha->hw.num_tx_rings = ha->hw.num_sds_rings; #ifdef QL_ENABLE_ISCSI_TLV ha->hw.num_tx_rings = ha->hw.num_sds_rings * 2; #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ ql_hw_add_sysctls(ha); ha->msix_count = ha->hw.num_sds_rings + 1; if (pci_alloc_msix(dev, &ha->msix_count)) { device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__, ha->msix_count); ha->msix_count = 0; goto qla_pci_attach_err; } ha->mbx_irq_rid = 1; ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ha->mbx_irq_rid, (RF_ACTIVE | RF_SHAREABLE)); if (ha->mbx_irq == NULL) { device_printf(dev, "could not allocate mbx interrupt\n"); goto qla_pci_attach_err; } if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE), NULL, ql_mbx_isr, ha, &ha->mbx_handle)) { device_printf(dev, "could not setup mbx interrupt\n"); goto qla_pci_attach_err; } for (i = 0; i < ha->hw.num_sds_rings; i++) { ha->irq_vec[i].sds_idx = i; ha->irq_vec[i].ha = ha; ha->irq_vec[i].irq_rid = 2 + i; ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ha->irq_vec[i].irq_rid, (RF_ACTIVE | RF_SHAREABLE)); if (ha->irq_vec[i].irq == NULL) { device_printf(dev, "could not allocate interrupt\n"); goto qla_pci_attach_err; } if (bus_setup_intr(dev, ha->irq_vec[i].irq, (INTR_TYPE_NET | INTR_MPSAFE), NULL, ql_isr, &ha->irq_vec[i], &ha->irq_vec[i].handle)) { device_printf(dev, "could not setup interrupt\n"); goto qla_pci_attach_err; } ha->tx_fp[i].ha = ha; ha->tx_fp[i].txr_idx = i; if (qla_alloc_tx_br(ha, &ha->tx_fp[i])) { device_printf(dev, "%s: could not allocate tx_br[%d]\n", __func__, i); goto qla_pci_attach_err; } } if (qla_create_fp_taskqueues(ha) != 0) goto qla_pci_attach_err; printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus, ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count); ql_read_mac_addr(ha); /* allocate parent dma tag */ if (qla_alloc_parent_dma_tag(ha)) { device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n", __func__); goto qla_pci_attach_err; } /* alloc all dma buffers */ if (ql_alloc_dma(ha)) { device_printf(dev, "%s: ql_alloc_dma failed\n", __func__); goto qla_pci_attach_err; } qla_get_peer(ha); if (ql_minidump_init(ha) != 0) { device_printf(dev, "%s: ql_minidump_init failed\n", __func__); goto qla_pci_attach_err; } ql_alloc_drvr_state_buffer(ha); ql_alloc_sp_log_buffer(ha); /* create the o.s ethernet interface */ qla_init_ifnet(dev, ha); ha->flags.qla_watchdog_active = 1; ha->qla_watchdog_pause = 0; callout_init(&ha->tx_callout, TRUE); ha->flags.qla_callout_init = 1; /* create ioctl device interface */ if (ql_make_cdev(ha)) { device_printf(dev, "%s: ql_make_cdev failed\n", __func__); goto qla_pci_attach_err; } callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, qla_watchdog, ha); TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha); ha->err_tq = taskqueue_create("qla_errq", M_NOWAIT, taskqueue_thread_enqueue, &ha->err_tq); taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq", device_get_nameunit(ha->pci_dev)); TASK_INIT(&ha->async_event_task, 0, qla_async_event, ha); ha->async_event_tq = taskqueue_create("qla_asyncq", M_NOWAIT, taskqueue_thread_enqueue, &ha->async_event_tq); taskqueue_start_threads(&ha->async_event_tq, 1, PI_NET, "%s asyncq", device_get_nameunit(ha->pci_dev)); TASK_INIT(&ha->stats_task, 0, qla_stats, ha); ha->stats_tq = taskqueue_create("qla_statsq", M_NOWAIT, taskqueue_thread_enqueue, &ha->stats_tq); taskqueue_start_threads(&ha->stats_tq, 1, PI_NET, "%s taskq", device_get_nameunit(ha->pci_dev)); QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__)); return (0); qla_pci_attach_err: qla_release(ha); if (ha->flags.lock_init) { mtx_destroy(&ha->hw_lock); mtx_destroy(&ha->sp_log_lock); } QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__)); return (ENXIO); } /* * Name: qla_pci_detach * Function: Unhooks the device from the operating system */ static int qla_pci_detach(device_t dev) { qla_host_t *ha = NULL; struct ifnet *ifp; if ((ha = device_get_softc(dev)) == NULL) { device_printf(dev, "cannot get softc\n"); return (ENOMEM); } QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); ifp = ha->ifp; ifp->if_drv_flags &= ~IFF_DRV_RUNNING; QLA_LOCK(ha, __func__, -1, 0); ha->qla_detach_active = 1; qla_stop(ha); qla_release(ha); QLA_UNLOCK(ha, __func__); if (ha->flags.lock_init) { mtx_destroy(&ha->hw_lock); mtx_destroy(&ha->sp_log_lock); } QL_DPRINT2(ha, (dev, "%s: exit\n", __func__)); return (0); } /* * SYSCTL Related Callbacks */ static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS) { int err, ret = 0; qla_host_t *ha; err = sysctl_handle_int(oidp, &ret, 0, req); if (err || !req->newptr) return (err); if (ret == 1) { ha = (qla_host_t *)arg1; ql_hw_link_status(ha); } return (err); } /* * Name: qla_release * Function: Releases the resources allocated for the device */ static void qla_release(qla_host_t *ha) { device_t dev; int i; dev = ha->pci_dev; if (ha->async_event_tq) { taskqueue_drain_all(ha->async_event_tq); taskqueue_free(ha->async_event_tq); } if (ha->err_tq) { taskqueue_drain_all(ha->err_tq); taskqueue_free(ha->err_tq); } if (ha->stats_tq) { taskqueue_drain_all(ha->stats_tq); taskqueue_free(ha->stats_tq); } ql_del_cdev(ha); if (ha->flags.qla_watchdog_active) { ha->qla_watchdog_exit = 1; while (ha->qla_watchdog_exited == 0) qla_mdelay(__func__, 1); } if (ha->flags.qla_callout_init) callout_stop(&ha->tx_callout); if (ha->ifp != NULL) ether_ifdetach(ha->ifp); ql_free_drvr_state_buffer(ha); ql_free_sp_log_buffer(ha); ql_free_dma(ha); qla_free_parent_dma_tag(ha); if (ha->mbx_handle) (void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle); if (ha->mbx_irq) (void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid, ha->mbx_irq); for (i = 0; i < ha->hw.num_sds_rings; i++) { if (ha->irq_vec[i].handle) { (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, ha->irq_vec[i].handle); } if (ha->irq_vec[i].irq) { (void)bus_release_resource(dev, SYS_RES_IRQ, ha->irq_vec[i].irq_rid, ha->irq_vec[i].irq); } qla_free_tx_br(ha, &ha->tx_fp[i]); } qla_destroy_fp_taskqueues(ha); if (ha->msix_count) pci_release_msi(dev); if (ha->pci_reg) (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, ha->pci_reg); if (ha->pci_reg1) (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1, ha->pci_reg1); return; } /* * DMA Related Functions */ static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { *((bus_addr_t *)arg) = 0; if (error) { printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); return; } *((bus_addr_t *)arg) = segs[0].ds_addr; return; } int ql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) { int ret = 0; device_t dev; bus_addr_t b_addr; dev = ha->pci_dev; QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); ret = bus_dma_tag_create( ha->parent_tag,/* parent */ dma_buf->alignment, ((bus_size_t)(1ULL << 32)),/* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dma_buf->size, /* maxsize */ 1, /* nsegments */ dma_buf->size, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &dma_buf->dma_tag); if (ret) { device_printf(dev, "%s: could not create dma tag\n", __func__); goto ql_alloc_dmabuf_exit; } ret = bus_dmamem_alloc(dma_buf->dma_tag, (void **)&dma_buf->dma_b, (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), &dma_buf->dma_map); if (ret) { bus_dma_tag_destroy(dma_buf->dma_tag); device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__); goto ql_alloc_dmabuf_exit; } ret = bus_dmamap_load(dma_buf->dma_tag, dma_buf->dma_map, dma_buf->dma_b, dma_buf->size, qla_dmamap_callback, &b_addr, BUS_DMA_NOWAIT); if (ret || !b_addr) { bus_dma_tag_destroy(dma_buf->dma_tag); bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); ret = -1; goto ql_alloc_dmabuf_exit; } dma_buf->dma_addr = b_addr; ql_alloc_dmabuf_exit: QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n", __func__, ret, (void *)dma_buf->dma_tag, (void *)dma_buf->dma_map, (void *)dma_buf->dma_b, dma_buf->size)); return ret; } void ql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) { bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); bus_dma_tag_destroy(dma_buf->dma_tag); } static int qla_alloc_parent_dma_tag(qla_host_t *ha) { int ret; device_t dev; dev = ha->pci_dev; /* * Allocate parent DMA Tag */ ret = bus_dma_tag_create( bus_get_dma_tag(dev), /* parent */ 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &ha->parent_tag); if (ret) { device_printf(dev, "%s: could not create parent dma tag\n", __func__); return (-1); } ha->flags.parent_tag = 1; return (0); } static void qla_free_parent_dma_tag(qla_host_t *ha) { if (ha->flags.parent_tag) { bus_dma_tag_destroy(ha->parent_tag); ha->flags.parent_tag = 0; } } /* * Name: qla_init_ifnet * Function: Creates the Network Device Interface and Registers it with the O.S */ static void qla_init_ifnet(device_t dev, qla_host_t *ha) { struct ifnet *ifp; QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); ifp = ha->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_baudrate = IF_Gbps(10); ifp->if_capabilities = IFCAP_LINKSTATE; ifp->if_mtu = ETHERMTU; ifp->if_init = qla_init; ifp->if_softc = ha; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = qla_ioctl; ifp->if_transmit = qla_transmit; ifp->if_qflush = qla_qflush; IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha)); ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha); IFQ_SET_READY(&ifp->if_snd); ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; ether_ifattach(ifp, qla_get_mac_addr(ha)); ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_JUMBO_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_VLAN_HWTSO | IFCAP_LRO; ifp->if_capenable = ifp->if_capabilities; ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status); ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0, NULL); ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); QL_DPRINT2(ha, (dev, "%s: exit\n", __func__)); return; } static void qla_init_locked(qla_host_t *ha) { struct ifnet *ifp = ha->ifp; ql_sp_log(ha, 14, 0, 0, 0, 0, 0, 0); qla_stop(ha); if (qla_alloc_xmt_bufs(ha) != 0) return; qla_confirm_9kb_enable(ha); if (qla_alloc_rcv_bufs(ha) != 0) return; bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN); ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO; ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6; ha->stop_rcv = 0; if (ql_init_hw_if(ha) == 0) { ifp = ha->ifp; ifp->if_drv_flags |= IFF_DRV_RUNNING; ha->hw_vlan_tx_frames = 0; ha->tx_tso_frames = 0; ha->qla_interface_up = 1; ql_update_link_state(ha); } else { if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_IF_START_FAILURE) ha->hw.sp_log_stop = -1; } ha->qla_watchdog_pause = 0; return; } static void qla_init(void *arg) { qla_host_t *ha; ha = (qla_host_t *)arg; QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); if (QLA_LOCK(ha, __func__, -1, 0) != 0) return; qla_init_locked(ha); QLA_UNLOCK(ha, __func__); QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); } static u_int qla_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt) { uint8_t *mta = arg; if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS) return (0); bcopy(LLADDR(sdl), &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN); return (1); } static int qla_set_multi(qla_host_t *ha, uint32_t add_multi) { uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN]; int mcnt = 0; struct ifnet *ifp = ha->ifp; int ret = 0; mcnt = if_foreach_llmaddr(ifp, qla_copy_maddr, mta); if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, QLA_LOCK_NO_SLEEP) != 0) return (-1); ql_sp_log(ha, 12, 4, ifp->if_drv_flags, (ifp->if_drv_flags & IFF_DRV_RUNNING), add_multi, (uint32_t)mcnt, 0); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if (!add_multi) { ret = qla_hw_del_all_mcast(ha); if (ret) device_printf(ha->pci_dev, "%s: qla_hw_del_all_mcast() failed\n", __func__); } if (!ret) ret = ql_hw_set_multi(ha, mta, mcnt, 1); } QLA_UNLOCK(ha, __func__); return (ret); } static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { int ret = 0; struct ifreq *ifr = (struct ifreq *)data; +#ifdef INET struct ifaddr *ifa = (struct ifaddr *)data; +#endif qla_host_t *ha; ha = (qla_host_t *)ifp->if_softc; if (ha->offline || ha->qla_initiate_recovery) return (ret); switch (cmd) { case SIOCSIFADDR: QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n", __func__, cmd)); +#ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) { ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, QLA_LOCK_NO_SLEEP); if (ret) break; ifp->if_flags |= IFF_UP; ql_sp_log(ha, 8, 3, ifp->if_drv_flags, (ifp->if_drv_flags & IFF_DRV_RUNNING), ntohl(IA_SIN(ifa)->sin_addr.s_addr), 0, 0); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { qla_init_locked(ha); } QLA_UNLOCK(ha, __func__); QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", __func__, cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr))); arp_ifinit(ifp, ifa); - } else { - ether_ioctl(ifp, cmd, data); + break; } +#endif + ether_ioctl(ifp, cmd, data); break; case SIOCSIFMTU: QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n", __func__, cmd)); if (ifr->ifr_mtu > QLA_MAX_MTU) { ret = EINVAL; } else { ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, QLA_LOCK_NO_SLEEP); if (ret) break; ifp->if_mtu = ifr->ifr_mtu; ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; ql_sp_log(ha, 9, 4, ifp->if_drv_flags, (ifp->if_drv_flags & IFF_DRV_RUNNING), ha->max_frame_size, ifp->if_mtu, 0); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { qla_init_locked(ha); } if (ifp->if_mtu > ETHERMTU) ha->std_replenish = QL_JUMBO_REPLENISH_THRES; else ha->std_replenish = QL_STD_REPLENISH_THRES; QLA_UNLOCK(ha, __func__); } break; case SIOCSIFFLAGS: QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n", __func__, cmd)); ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, QLA_LOCK_NO_SLEEP); if (ret) break; ql_sp_log(ha, 10, 4, ifp->if_drv_flags, (ifp->if_drv_flags & IFF_DRV_RUNNING), ha->if_flags, ifp->if_flags, 0); if (ifp->if_flags & IFF_UP) { ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; qla_init_locked(ha); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if ((ifp->if_flags ^ ha->if_flags) & IFF_PROMISC) { ret = ql_set_promisc(ha); } else if ((ifp->if_flags ^ ha->if_flags) & IFF_ALLMULTI) { ret = ql_set_allmulti(ha); } } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) qla_stop(ha); ha->if_flags = ifp->if_flags; } QLA_UNLOCK(ha, __func__); break; case SIOCADDMULTI: QL_DPRINT4(ha, (ha->pci_dev, "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd)); if (qla_set_multi(ha, 1)) ret = EINVAL; break; case SIOCDELMULTI: QL_DPRINT4(ha, (ha->pci_dev, "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd)); if (qla_set_multi(ha, 0)) ret = EINVAL; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", __func__, cmd)); ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); break; case SIOCSIFCAP: { int mask = ifr->ifr_reqcap ^ ifp->if_capenable; QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n", __func__, cmd)); if (mask & IFCAP_HWCSUM) ifp->if_capenable ^= IFCAP_HWCSUM; if (mask & IFCAP_TSO4) ifp->if_capenable ^= IFCAP_TSO4; if (mask & IFCAP_TSO6) ifp->if_capenable ^= IFCAP_TSO6; if (mask & IFCAP_VLAN_HWTAGGING) ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (mask & IFCAP_VLAN_HWTSO) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if (mask & IFCAP_LRO) ifp->if_capenable ^= IFCAP_LRO; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, QLA_LOCK_NO_SLEEP); if (ret) break; ql_sp_log(ha, 11, 4, ifp->if_drv_flags, (ifp->if_drv_flags & IFF_DRV_RUNNING), mask, ifp->if_capenable, 0); qla_init_locked(ha); QLA_UNLOCK(ha, __func__); } VLAN_CAPABILITIES(ifp); break; } default: QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n", __func__, cmd)); ret = ether_ioctl(ifp, cmd, data); break; } return (ret); } static int qla_media_change(struct ifnet *ifp) { qla_host_t *ha; struct ifmedia *ifm; int ret = 0; ha = (qla_host_t *)ifp->if_softc; QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); ifm = &ha->media; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) ret = EINVAL; QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); return (ret); } static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { qla_host_t *ha; ha = (qla_host_t *)ifp->if_softc; QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; ql_update_link_state(ha); if (ha->hw.link_up) { ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha)); } QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\ (ha->hw.link_up ? "link_up" : "link_down"))); return; } static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx, uint32_t iscsi_pdu) { bus_dma_segment_t segs[QLA_MAX_SEGMENTS]; bus_dmamap_t map; int nsegs; int ret = -1; uint32_t tx_idx; struct mbuf *m_head = *m_headp; QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__)); tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next; if ((NULL != ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head) || (QL_ERR_INJECT(ha, INJCT_TXBUF_MBUF_NON_NULL))){ QL_ASSERT(ha, 0, ("%s [%d]: txr_idx = %d tx_idx = %d "\ "mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx,\ ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head)); device_printf(ha->pci_dev, "%s [%d]: txr_idx = %d tx_idx = %d " "mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx, ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head); if (m_head) m_freem(m_head); *m_headp = NULL; QL_INITIATE_RECOVERY(ha); return (ret); } map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map; ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (ret == EFBIG) { struct mbuf *m; QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__, m_head->m_pkthdr.len)); m = m_defrag(m_head, M_NOWAIT); if (m == NULL) { ha->err_tx_defrag++; m_freem(m_head); *m_headp = NULL; device_printf(ha->pci_dev, "%s: m_defrag() = NULL [%d]\n", __func__, ret); return (ENOBUFS); } m_head = m; *m_headp = m_head; if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, BUS_DMA_NOWAIT))) { ha->err_tx_dmamap_load++; device_printf(ha->pci_dev, "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n", __func__, ret, m_head->m_pkthdr.len); if (ret != ENOMEM) { m_freem(m_head); *m_headp = NULL; } return (ret); } } else if (ret) { ha->err_tx_dmamap_load++; device_printf(ha->pci_dev, "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n", __func__, ret, m_head->m_pkthdr.len); if (ret != ENOMEM) { m_freem(m_head); *m_headp = NULL; } return (ret); } QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet")); bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx, iscsi_pdu))) { ha->tx_ring[txr_idx].count++; if (iscsi_pdu) ha->tx_ring[txr_idx].iscsi_pkt_count++; ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head; } else { bus_dmamap_unload(ha->tx_tag, map); if (ret == EINVAL) { if (m_head) m_freem(m_head); *m_headp = NULL; } } QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__)); return (ret); } static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *fp) { snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), "qla%d_fp%d_tx_mq_lock", ha->pci_func, fp->txr_idx); mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); fp->tx_br = buf_ring_alloc(NUM_TX_DESCRIPTORS, M_DEVBUF, M_NOWAIT, &fp->tx_mtx); if (fp->tx_br == NULL) { QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for " " fp[%d, %d]\n", ha->pci_func, fp->txr_idx)); return (-ENOMEM); } return 0; } static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *fp) { struct mbuf *mp; struct ifnet *ifp = ha->ifp; if (mtx_initialized(&fp->tx_mtx)) { if (fp->tx_br != NULL) { mtx_lock(&fp->tx_mtx); while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { m_freem(mp); } mtx_unlock(&fp->tx_mtx); buf_ring_free(fp->tx_br, M_DEVBUF); fp->tx_br = NULL; } mtx_destroy(&fp->tx_mtx); } return; } static void qla_fp_taskqueue(void *context, int pending) { qla_tx_fp_t *fp; qla_host_t *ha; struct ifnet *ifp; struct mbuf *mp = NULL; int ret = 0; uint32_t txr_idx; uint32_t iscsi_pdu = 0; uint32_t rx_pkts_left = -1; fp = context; if (fp == NULL) return; ha = (qla_host_t *)fp->ha; ifp = ha->ifp; txr_idx = fp->txr_idx; mtx_lock(&fp->tx_mtx); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) || (!ha->hw.link_up)) { mtx_unlock(&fp->tx_mtx); goto qla_fp_taskqueue_exit; } while (rx_pkts_left && !ha->stop_rcv && (ifp->if_drv_flags & IFF_DRV_RUNNING) && ha->hw.link_up) { rx_pkts_left = ql_rcv_isr(ha, fp->txr_idx, 64); #ifdef QL_ENABLE_ISCSI_TLV ql_hw_tx_done_locked(ha, fp->txr_idx); ql_hw_tx_done_locked(ha, (fp->txr_idx + (ha->hw.num_tx_rings >> 1))); #else ql_hw_tx_done_locked(ha, fp->txr_idx); #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ mp = drbr_peek(ifp, fp->tx_br); while (mp != NULL) { if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) { #ifdef QL_ENABLE_ISCSI_TLV if (ql_iscsi_pdu(ha, mp) == 0) { txr_idx = txr_idx + (ha->hw.num_tx_rings >> 1); iscsi_pdu = 1; } else { iscsi_pdu = 0; txr_idx = fp->txr_idx; } #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ } ret = qla_send(ha, &mp, txr_idx, iscsi_pdu); if (ret) { if (mp != NULL) drbr_putback(ifp, fp->tx_br, mp); else { drbr_advance(ifp, fp->tx_br); } mtx_unlock(&fp->tx_mtx); goto qla_fp_taskqueue_exit0; } else { drbr_advance(ifp, fp->tx_br); } /* Send a copy of the frame to the BPF listener */ ETHER_BPF_MTAP(ifp, mp); if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) || (!ha->hw.link_up)) break; mp = drbr_peek(ifp, fp->tx_br); } } mtx_unlock(&fp->tx_mtx); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) goto qla_fp_taskqueue_exit; qla_fp_taskqueue_exit0: if (rx_pkts_left || ((mp != NULL) && ret)) { taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); } else { if (!ha->stop_rcv) { QL_ENABLE_INTERRUPTS(ha, fp->txr_idx); } } qla_fp_taskqueue_exit: QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret)); return; } static int qla_create_fp_taskqueues(qla_host_t *ha) { int i; uint8_t tq_name[32]; for (i = 0; i < ha->hw.num_sds_rings; i++) { qla_tx_fp_t *fp = &ha->tx_fp[i]; bzero(tq_name, sizeof (tq_name)); snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); NET_TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp); fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT, taskqueue_thread_enqueue, &fp->fp_taskqueue); if (fp->fp_taskqueue == NULL) return (-1); taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s", tq_name); QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__, fp->fp_taskqueue)); } return (0); } static void qla_destroy_fp_taskqueues(qla_host_t *ha) { int i; for (i = 0; i < ha->hw.num_sds_rings; i++) { qla_tx_fp_t *fp = &ha->tx_fp[i]; if (fp->fp_taskqueue != NULL) { taskqueue_drain_all(fp->fp_taskqueue); taskqueue_free(fp->fp_taskqueue); fp->fp_taskqueue = NULL; } } return; } static void qla_drain_fp_taskqueues(qla_host_t *ha) { int i; for (i = 0; i < ha->hw.num_sds_rings; i++) { qla_tx_fp_t *fp = &ha->tx_fp[i]; if (fp->fp_taskqueue != NULL) { taskqueue_drain_all(fp->fp_taskqueue); } } return; } static int qla_transmit(struct ifnet *ifp, struct mbuf *mp) { qla_host_t *ha = (qla_host_t *)ifp->if_softc; qla_tx_fp_t *fp; int rss_id = 0; int ret = 0; QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); #if __FreeBSD_version >= 1100000 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) #else if (mp->m_flags & M_FLOWID) #endif rss_id = (mp->m_pkthdr.flowid & Q8_RSS_IND_TBL_MAX_IDX) % ha->hw.num_sds_rings; fp = &ha->tx_fp[rss_id]; if (fp->tx_br == NULL) { ret = EINVAL; goto qla_transmit_exit; } if (mp != NULL) { ret = drbr_enqueue(ifp, fp->tx_br, mp); } if (fp->fp_taskqueue != NULL) taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); ret = 0; qla_transmit_exit: QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret)); return ret; } static void qla_qflush(struct ifnet *ifp) { int i; qla_tx_fp_t *fp; struct mbuf *mp; qla_host_t *ha; ha = (qla_host_t *)ifp->if_softc; QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); for (i = 0; i < ha->hw.num_sds_rings; i++) { fp = &ha->tx_fp[i]; if (fp == NULL) continue; if (fp->tx_br) { mtx_lock(&fp->tx_mtx); while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { m_freem(mp); } mtx_unlock(&fp->tx_mtx); } } QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); return; } static void qla_stop(qla_host_t *ha) { struct ifnet *ifp = ha->ifp; int i = 0; ql_sp_log(ha, 13, 0, 0, 0, 0, 0, 0); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ha->qla_watchdog_pause = 1; for (i = 0; i < ha->hw.num_sds_rings; i++) { qla_tx_fp_t *fp; fp = &ha->tx_fp[i]; if (fp == NULL) continue; if (fp->tx_br != NULL) { mtx_lock(&fp->tx_mtx); mtx_unlock(&fp->tx_mtx); } } while (!ha->qla_watchdog_paused) qla_mdelay(__func__, 1); ha->qla_interface_up = 0; qla_drain_fp_taskqueues(ha); ql_del_hw_if(ha); qla_free_xmt_bufs(ha); qla_free_rcv_bufs(ha); return; } /* * Buffer Management Functions for Transmit and Receive Rings */ static int qla_alloc_xmt_bufs(qla_host_t *ha) { int ret = 0; uint32_t i, j; qla_tx_buf_t *txb; if (bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ QLA_MAX_TSO_FRAME_SIZE, /* maxsize */ QLA_MAX_SEGMENTS, /* nsegments */ PAGE_SIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &ha->tx_tag)) { device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n", __func__); return (ENOMEM); } for (i = 0; i < ha->hw.num_tx_rings; i++) { bzero((void *)ha->tx_ring[i].tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); } for (j = 0; j < ha->hw.num_tx_rings; j++) { for (i = 0; i < NUM_TX_DESCRIPTORS; i++) { txb = &ha->tx_ring[j].tx_buf[i]; if ((ret = bus_dmamap_create(ha->tx_tag, BUS_DMA_NOWAIT, &txb->map))) { ha->err_tx_dmamap_create++; device_printf(ha->pci_dev, "%s: bus_dmamap_create failed[%d]\n", __func__, ret); qla_free_xmt_bufs(ha); return (ret); } } } return 0; } /* * Release mbuf after it sent on the wire */ static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb) { QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); if (txb->m_head) { bus_dmamap_sync(ha->tx_tag, txb->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ha->tx_tag, txb->map); m_freem(txb->m_head); txb->m_head = NULL; bus_dmamap_destroy(ha->tx_tag, txb->map); txb->map = NULL; } if (txb->map) { bus_dmamap_unload(ha->tx_tag, txb->map); bus_dmamap_destroy(ha->tx_tag, txb->map); txb->map = NULL; } QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); } static void qla_free_xmt_bufs(qla_host_t *ha) { int i, j; for (j = 0; j < ha->hw.num_tx_rings; j++) { for (i = 0; i < NUM_TX_DESCRIPTORS; i++) qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]); } if (ha->tx_tag != NULL) { bus_dma_tag_destroy(ha->tx_tag); ha->tx_tag = NULL; } for (i = 0; i < ha->hw.num_tx_rings; i++) { bzero((void *)ha->tx_ring[i].tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); } return; } static int qla_alloc_rcv_std(qla_host_t *ha) { int i, j, k, r, ret = 0; qla_rx_buf_t *rxb; qla_rx_ring_t *rx_ring; for (r = 0; r < ha->hw.num_rds_rings; r++) { rx_ring = &ha->rx_ring[r]; for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { rxb = &rx_ring->rx_buf[i]; ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map); if (ret) { device_printf(ha->pci_dev, "%s: dmamap[%d, %d] failed\n", __func__, r, i); for (k = 0; k < r; k++) { for (j = 0; j < NUM_RX_DESCRIPTORS; j++) { rxb = &ha->rx_ring[k].rx_buf[j]; bus_dmamap_destroy(ha->rx_tag, rxb->map); } } for (j = 0; j < i; j++) { bus_dmamap_destroy(ha->rx_tag, rx_ring->rx_buf[j].map); } goto qla_alloc_rcv_std_err; } } } qla_init_hw_rcv_descriptors(ha); for (r = 0; r < ha->hw.num_rds_rings; r++) { rx_ring = &ha->rx_ring[r]; for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { rxb = &rx_ring->rx_buf[i]; rxb->handle = i; if (!(ret = ql_get_mbuf(ha, rxb, NULL))) { /* * set the physical address in the * corresponding descriptor entry in the * receive ring/queue for the hba */ qla_set_hw_rcv_desc(ha, r, i, rxb->handle, rxb->paddr, (rxb->m_head)->m_pkthdr.len); } else { device_printf(ha->pci_dev, "%s: ql_get_mbuf [%d, %d] failed\n", __func__, r, i); bus_dmamap_destroy(ha->rx_tag, rxb->map); goto qla_alloc_rcv_std_err; } } } return 0; qla_alloc_rcv_std_err: return (-1); } static void qla_free_rcv_std(qla_host_t *ha) { int i, r; qla_rx_buf_t *rxb; for (r = 0; r < ha->hw.num_rds_rings; r++) { for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { rxb = &ha->rx_ring[r].rx_buf[i]; if (rxb->m_head != NULL) { bus_dmamap_unload(ha->rx_tag, rxb->map); bus_dmamap_destroy(ha->rx_tag, rxb->map); m_freem(rxb->m_head); rxb->m_head = NULL; } } } return; } static int qla_alloc_rcv_bufs(qla_host_t *ha) { int i, ret = 0; if (bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MJUM9BYTES, /* maxsize */ 1, /* nsegments */ MJUM9BYTES, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &ha->rx_tag)) { device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n", __func__); return (ENOMEM); } bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS)); for (i = 0; i < ha->hw.num_sds_rings; i++) { ha->hw.sds[i].sdsr_next = 0; ha->hw.sds[i].rxb_free = NULL; ha->hw.sds[i].rx_free = 0; } ret = qla_alloc_rcv_std(ha); return (ret); } static void qla_free_rcv_bufs(qla_host_t *ha) { int i; qla_free_rcv_std(ha); if (ha->rx_tag != NULL) { bus_dma_tag_destroy(ha->rx_tag); ha->rx_tag = NULL; } bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS)); for (i = 0; i < ha->hw.num_sds_rings; i++) { ha->hw.sds[i].sdsr_next = 0; ha->hw.sds[i].rxb_free = NULL; ha->hw.sds[i].rx_free = 0; } return; } int ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp) { register struct mbuf *mp = nmp; int ret = 0; uint32_t offset; bus_dma_segment_t segs[1]; int nsegs, mbuf_size; QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); if (ha->hw.enable_9kb) mbuf_size = MJUM9BYTES; else mbuf_size = MCLBYTES; if (mp == NULL) { if (QL_ERR_INJECT(ha, INJCT_M_GETCL_M_GETJCL_FAILURE)) return(-1); if (ha->hw.enable_9kb) mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, mbuf_size); else mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (mp == NULL) { ha->err_m_getcl++; ret = ENOBUFS; device_printf(ha->pci_dev, "%s: m_getcl failed\n", __func__); goto exit_ql_get_mbuf; } mp->m_len = mp->m_pkthdr.len = mbuf_size; } else { mp->m_len = mp->m_pkthdr.len = mbuf_size; mp->m_data = mp->m_ext.ext_buf; mp->m_next = NULL; } offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL); if (offset) { offset = 8 - offset; m_adj(mp, offset); } /* * Using memory from the mbuf cluster pool, invoke the bus_dma * machinery to arrange the memory mapping. */ ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map, mp, segs, &nsegs, BUS_DMA_NOWAIT); rxb->paddr = segs[0].ds_addr; if (ret || !rxb->paddr || (nsegs != 1)) { m_free(mp); rxb->m_head = NULL; device_printf(ha->pci_dev, "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n", __func__, ret, (long long unsigned int)rxb->paddr, nsegs); ret = -1; goto exit_ql_get_mbuf; } rxb->m_head = mp; bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD); exit_ql_get_mbuf: QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret)); return (ret); } static void qla_get_peer(qla_host_t *ha) { device_t *peers; int count, i, slot; int my_slot = pci_get_slot(ha->pci_dev); if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count)) return; for (i = 0; i < count; i++) { slot = pci_get_slot(peers[i]); if ((slot >= 0) && (slot == my_slot) && (pci_get_device(peers[i]) == pci_get_device(ha->pci_dev))) { if (ha->pci_dev != peers[i]) ha->peer_dev = peers[i]; } } } static void qla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer) { qla_host_t *ha_peer; if (ha->peer_dev) { if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) { ha_peer->msg_from_peer = msg_to_peer; } } } void qla_set_error_recovery(qla_host_t *ha) { struct ifnet *ifp = ha->ifp; if (!cold && ha->enable_error_recovery) { if (ifp) ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ha->qla_initiate_recovery = 1; } else ha->offline = 1; return; } static void qla_error_recovery(void *context, int pending) { qla_host_t *ha = context; uint32_t msecs_100 = 400; struct ifnet *ifp = ha->ifp; int i = 0; device_printf(ha->pci_dev, "%s: enter\n", __func__); ha->hw.imd_compl = 1; taskqueue_drain_all(ha->stats_tq); taskqueue_drain_all(ha->async_event_tq); if (QLA_LOCK(ha, __func__, -1, 0) != 0) return; device_printf(ha->pci_dev, "%s: ts_usecs = %ld start\n", __func__, qla_get_usec_timestamp()); if (ha->qla_interface_up) { qla_mdelay(__func__, 300); //ifp->if_drv_flags &= ~IFF_DRV_RUNNING; for (i = 0; i < ha->hw.num_sds_rings; i++) { qla_tx_fp_t *fp; fp = &ha->tx_fp[i]; if (fp == NULL) continue; if (fp->tx_br != NULL) { mtx_lock(&fp->tx_mtx); mtx_unlock(&fp->tx_mtx); } } } qla_drain_fp_taskqueues(ha); if ((ha->pci_func & 0x1) == 0) { if (!ha->msg_from_peer) { qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET); while ((ha->msg_from_peer != QL_PEER_MSG_ACK) && msecs_100--) qla_mdelay(__func__, 100); } ha->msg_from_peer = 0; if (ha->enable_minidump) ql_minidump(ha); if (ha->enable_driverstate_dump) ql_capture_drvr_state(ha); if (ql_init_hw(ha)) { device_printf(ha->pci_dev, "%s: ts_usecs = %ld exit: ql_init_hw failed\n", __func__, qla_get_usec_timestamp()); ha->offline = 1; goto qla_error_recovery_exit; } if (ha->qla_interface_up) { qla_free_xmt_bufs(ha); qla_free_rcv_bufs(ha); } if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY)) qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK); } else { if (ha->msg_from_peer == QL_PEER_MSG_RESET) { ha->msg_from_peer = 0; if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY)) qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK); } else { qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET); } while ((ha->msg_from_peer != QL_PEER_MSG_ACK) && msecs_100--) qla_mdelay(__func__, 100); ha->msg_from_peer = 0; if (ha->enable_driverstate_dump) ql_capture_drvr_state(ha); if (msecs_100 == 0) { device_printf(ha->pci_dev, "%s: ts_usecs = %ld exit: QL_PEER_MSG_ACK not received\n", __func__, qla_get_usec_timestamp()); ha->offline = 1; goto qla_error_recovery_exit; } if (ql_init_hw(ha)) { device_printf(ha->pci_dev, "%s: ts_usecs = %ld exit: ql_init_hw failed\n", __func__, qla_get_usec_timestamp()); ha->offline = 1; goto qla_error_recovery_exit; } if (ha->qla_interface_up) { qla_free_xmt_bufs(ha); qla_free_rcv_bufs(ha); } } qla_mdelay(__func__, ha->ms_delay_after_init); *((uint32_t *)&ha->hw.flags) = 0; ha->qla_initiate_recovery = 0; if (ha->qla_interface_up) { if (qla_alloc_xmt_bufs(ha) != 0) { ha->offline = 1; goto qla_error_recovery_exit; } qla_confirm_9kb_enable(ha); if (qla_alloc_rcv_bufs(ha) != 0) { ha->offline = 1; goto qla_error_recovery_exit; } ha->stop_rcv = 0; if (ql_init_hw_if(ha) == 0) { ifp = ha->ifp; ifp->if_drv_flags |= IFF_DRV_RUNNING; ha->qla_watchdog_pause = 0; ql_update_link_state(ha); } else { ha->offline = 1; if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_IF_START_FAILURE) ha->hw.sp_log_stop = -1; } } else { ha->qla_watchdog_pause = 0; } qla_error_recovery_exit: if (ha->offline ) { device_printf(ha->pci_dev, "%s: ts_usecs = %ld port offline\n", __func__, qla_get_usec_timestamp()); if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_ERR_RECOVERY_FAILURE) ha->hw.sp_log_stop = -1; } QLA_UNLOCK(ha, __func__); if (!ha->offline) callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, qla_watchdog, ha); device_printf(ha->pci_dev, "%s: ts_usecs = %ld exit\n", __func__, qla_get_usec_timestamp()); return; } static void qla_async_event(void *context, int pending) { qla_host_t *ha = context; if (QLA_LOCK(ha, __func__, -1, 0) != 0) return; if (ha->async_event) { ha->async_event = 0; qla_hw_async_event(ha); } QLA_UNLOCK(ha, __func__); return; } static void qla_stats(void *context, int pending) { qla_host_t *ha; ha = context; ql_get_stats(ha); return; } diff --git a/sys/dev/qlxgbe/ql_os.h b/sys/dev/qlxgbe/ql_os.h index f14c395d9663..fcee6e740828 100644 --- a/sys/dev/qlxgbe/ql_os.h +++ b/sys/dev/qlxgbe/ql_os.h @@ -1,177 +1,179 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013-2016 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * File: ql_os.h * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. */ #ifndef _QL_OS_H_ #define _QL_OS_H_ +#include "opt_inet.h" + #include #include #include #include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version < 900044 #error FreeBSD Version not supported - use version >= 900044 #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define QLA_USEC_DELAY(usec) DELAY(usec) static __inline int qla_ms_to_hz(int ms) { int qla_hz; struct timeval t; t.tv_sec = ms / 1000; t.tv_usec = (ms % 1000) * 1000; qla_hz = tvtohz(&t); if (qla_hz < 0) qla_hz = 0x7fffffff; if (!qla_hz) qla_hz = 1; return (qla_hz); } static __inline int qla_sec_to_hz(int sec) { struct timeval t; t.tv_sec = sec; t.tv_usec = 0; return (tvtohz(&t)); } static __inline uint64_t qla_get_usec_timestamp(void) { struct timeval tv; microuptime(&tv); return ((uint64_t)(((uint64_t)tv.tv_sec) * 1000000 + tv.tv_usec)); } #define qla_host_to_le16(x) htole16(x) #define qla_host_to_le32(x) htole32(x) #define qla_host_to_le64(x) htole64(x) #define qla_host_to_be16(x) htobe16(x) #define qla_host_to_be32(x) htobe32(x) #define qla_host_to_be64(x) htobe64(x) #define qla_le16_to_host(x) le16toh(x) #define qla_le32_to_host(x) le32toh(x) #define qla_le64_to_host(x) le64toh(x) #define qla_be16_to_host(x) be16toh(x) #define qla_be32_to_host(x) be32toh(x) #define qla_be64_to_host(x) be64toh(x) MALLOC_DECLARE(M_QLA83XXBUF); #define qla_mdelay(fn, msecs) \ {\ if (cold) \ DELAY((msecs * 1000)); \ else \ pause(fn, qla_ms_to_hz(msecs)); \ } /* * Locks */ #define QLA_LOCK(ha, str, to_ms, no_sleep) qla_lock(ha, str, to_ms, no_sleep) #define QLA_UNLOCK(ha, str) qla_unlock(ha, str) /* * structure encapsulating a DMA buffer */ struct qla_dma { bus_size_t alignment; uint32_t size; void *dma_b; bus_addr_t dma_addr; bus_dmamap_t dma_map; bus_dma_tag_t dma_tag; }; typedef struct qla_dma qla_dma_t; #endif /* #ifndef _QL_OS_H_ */ diff --git a/sys/dev/qlxge/qls_hw.c b/sys/dev/qlxge/qls_hw.c index 3cb2656a1cd5..ab9449089f10 100644 --- a/sys/dev/qlxge/qls_hw.c +++ b/sys/dev/qlxge/qls_hw.c @@ -1,2398 +1,2408 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013-2014 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * File: qls_hw.c * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. * Content: Contains Hardware dependent functions */ #include __FBSDID("$FreeBSD$"); #include "qls_os.h" #include "qls_hw.h" #include "qls_def.h" #include "qls_inline.h" #include "qls_ver.h" #include "qls_glbl.h" #include "qls_dbg.h" /* * Static Functions */ static int qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op); static int qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac); static int qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac, uint32_t index); static int qls_init_rss(qla_host_t *ha); static int qls_init_comp_queue(qla_host_t *ha, int cid); static int qls_init_work_queue(qla_host_t *ha, int wid); static int qls_init_fw_routing_table(qla_host_t *ha); static int qls_hw_add_all_mcast(qla_host_t *ha); static int qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta); static int qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta); static int qls_wait_for_flash_ready(qla_host_t *ha); static int qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value); static void qls_sem_unlock(qla_host_t *ha, uint32_t mask); static void qls_free_tx_dma(qla_host_t *ha); static int qls_alloc_tx_dma(qla_host_t *ha); static void qls_free_rx_dma(qla_host_t *ha); static int qls_alloc_rx_dma(qla_host_t *ha); static void qls_free_mpi_dma(qla_host_t *ha); static int qls_alloc_mpi_dma(qla_host_t *ha); static void qls_free_rss_dma(qla_host_t *ha); static int qls_alloc_rss_dma(qla_host_t *ha); static int qls_flash_validate(qla_host_t *ha, const char *signature); static int qls_wait_for_proc_addr_ready(qla_host_t *ha); static int qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg, uint32_t *data); static int qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg, uint32_t data); static int qls_hw_reset(qla_host_t *ha); /* * MPI Related Functions */ static int qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count, uint32_t *out_mbx, uint32_t o_count); static int qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl); static int qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status); static void qls_mbx_get_link_status(qla_host_t *ha); static void qls_mbx_about_fw(qla_host_t *ha); int qls_get_msix_count(qla_host_t *ha) { return (ha->num_rx_rings); } static int qls_syctl_mpi_dump(SYSCTL_HANDLER_ARGS) { int err = 0, ret; qla_host_t *ha; err = sysctl_handle_int(oidp, &ret, 0, req); if (err || !req->newptr) return (err); if (ret == 1) { ha = (qla_host_t *)arg1; qls_mpi_core_dump(ha); } return (err); } static int qls_syctl_link_status(SYSCTL_HANDLER_ARGS) { int err = 0, ret; qla_host_t *ha; err = sysctl_handle_int(oidp, &ret, 0, req); if (err || !req->newptr) return (err); if (ret == 1) { ha = (qla_host_t *)arg1; qls_mbx_get_link_status(ha); qls_mbx_about_fw(ha); } return (err); } void qls_hw_add_sysctls(qla_host_t *ha) { device_t dev; dev = ha->pci_dev; ha->num_rx_rings = MAX_RX_RINGS; ha->num_tx_rings = MAX_TX_RINGS; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "num_rx_rings", CTLFLAG_RD, &ha->num_rx_rings, ha->num_rx_rings, "Number of Completion Queues"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->num_tx_rings, ha->num_tx_rings, "Number of Transmit Rings"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "mpi_dump", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0, qls_syctl_mpi_dump, "I", "MPI Dump"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0, qls_syctl_link_status, "I", "Link Status"); } /* * Name: qls_free_dma * Function: Frees the DMA'able memory allocated in qls_alloc_dma() */ void qls_free_dma(qla_host_t *ha) { qls_free_rss_dma(ha); qls_free_mpi_dma(ha); qls_free_tx_dma(ha); qls_free_rx_dma(ha); return; } /* * Name: qls_alloc_dma * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts. */ int qls_alloc_dma(qla_host_t *ha) { if (qls_alloc_rx_dma(ha)) return (-1); if (qls_alloc_tx_dma(ha)) { qls_free_rx_dma(ha); return (-1); } if (qls_alloc_mpi_dma(ha)) { qls_free_tx_dma(ha); qls_free_rx_dma(ha); return (-1); } if (qls_alloc_rss_dma(ha)) { qls_free_mpi_dma(ha); qls_free_tx_dma(ha); qls_free_rx_dma(ha); return (-1); } return (0); } static int qls_wait_for_mac_proto_idx_ready(qla_host_t *ha, uint32_t op) { uint32_t data32; uint32_t count = 3; while (count--) { data32 = READ_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX); if (data32 & op) return (0); QLA_USEC_DELAY(100); } ha->qla_initiate_recovery = 1; return (-1); } /* * Name: qls_config_unicast_mac_addr * Function: binds/unbinds a unicast MAC address to the interface. */ static int qls_config_unicast_mac_addr(qla_host_t *ha, uint32_t add_mac) { int ret = 0; uint32_t mac_upper = 0; uint32_t mac_lower = 0; uint32_t value = 0, index; if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES, Q81_CTL_SEM_SET_MAC_SERDES)) { QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__)); return(-1); } if (add_mac) { mac_upper = (ha->mac_addr[0] << 8) | ha->mac_addr[1]; mac_lower = (ha->mac_addr[2] << 24) | (ha->mac_addr[3] << 16) | (ha->mac_addr[4] << 8) | ha->mac_addr[5]; } ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW); if (ret) goto qls_config_unicast_mac_addr_exit; index = 128 * (ha->pci_func & 0x1); /* index */ value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) | Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC; WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value); WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower); ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW); if (ret) goto qls_config_unicast_mac_addr_exit; value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) | Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x1; WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value); WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper); ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW); if (ret) goto qls_config_unicast_mac_addr_exit; value = (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) | Q81_CTL_MAC_PROTO_AI_TYPE_CAM_MAC | 0x2; WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value); value = Q81_CAM_MAC_OFF2_ROUTE_NIC | ((ha->pci_func & 0x1) << Q81_CAM_MAC_OFF2_FUNC_SHIFT) | (0 << Q81_CAM_MAC_OFF2_CQID_SHIFT); WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, value); qls_config_unicast_mac_addr_exit: qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES); return (ret); } /* * Name: qls_config_mcast_mac_addr * Function: binds/unbinds a multicast MAC address to the interface. */ static int qls_config_mcast_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac, uint32_t index) { int ret = 0; uint32_t mac_upper = 0; uint32_t mac_lower = 0; uint32_t value = 0; if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_MAC_SERDES, Q81_CTL_SEM_SET_MAC_SERDES)) { QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__)); return(-1); } if (add_mac) { mac_upper = (mac_addr[0] << 8) | mac_addr[1]; mac_lower = (mac_addr[2] << 24) | (mac_addr[3] << 16) | (mac_addr[4] << 8) | mac_addr[5]; } ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW); if (ret) goto qls_config_mcast_mac_addr_exit; value = Q81_CTL_MAC_PROTO_AI_E | (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) | Q81_CTL_MAC_PROTO_AI_TYPE_MCAST ; WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value); WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_lower); ret = qls_wait_for_mac_proto_idx_ready(ha, Q81_CTL_MAC_PROTO_AI_MW); if (ret) goto qls_config_mcast_mac_addr_exit; value = Q81_CTL_MAC_PROTO_AI_E | (index << Q81_CTL_MAC_PROTO_AI_IDX_SHIFT) | Q81_CTL_MAC_PROTO_AI_TYPE_MCAST | 0x1; WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_INDEX, value); WRITE_REG32(ha, Q81_CTL_MAC_PROTO_ADDR_DATA, mac_upper); qls_config_mcast_mac_addr_exit: qls_sem_unlock(ha, Q81_CTL_SEM_MASK_MAC_SERDES); return (ret); } /* * Name: qls_set_mac_rcv_mode * Function: Enable/Disable AllMulticast and Promiscuous Modes. */ static int qls_wait_for_route_idx_ready(qla_host_t *ha, uint32_t op) { uint32_t data32; uint32_t count = 3; while (count--) { data32 = READ_REG32(ha, Q81_CTL_ROUTING_INDEX); if (data32 & op) return (0); QLA_USEC_DELAY(100); } ha->qla_initiate_recovery = 1; return (-1); } static int qls_load_route_idx_reg(qla_host_t *ha, uint32_t index, uint32_t data) { int ret = 0; ret = qls_wait_for_route_idx_ready(ha, Q81_CTL_RI_MW); if (ret) { device_printf(ha->pci_dev, "%s: [0x%08x, 0x%08x] failed\n", __func__, index, data); goto qls_load_route_idx_reg_exit; } WRITE_REG32(ha, Q81_CTL_ROUTING_INDEX, index); WRITE_REG32(ha, Q81_CTL_ROUTING_DATA, data); qls_load_route_idx_reg_exit: return (ret); } static int qls_load_route_idx_reg_locked(qla_host_t *ha, uint32_t index, uint32_t data) { int ret = 0; if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG, Q81_CTL_SEM_SET_RIDX_DATAREG)) { QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__)); return(-1); } ret = qls_load_route_idx_reg(ha, index, data); qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG); return (ret); } static int qls_clear_routing_table(qla_host_t *ha) { int i, ret = 0; if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG, Q81_CTL_SEM_SET_RIDX_DATAREG)) { QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__)); return(-1); } for (i = 0; i < 16; i++) { ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_TYPE_NICQMASK| (i << 8) | Q81_CTL_RI_DST_DFLTQ), 0); if (ret) break; } qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG); return (ret); } int qls_set_promisc(qla_host_t *ha) { int ret; ret = qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ), Q81_CTL_RD_VALID_PKT); return (ret); } void qls_reset_promisc(qla_host_t *ha) { qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_PROMISCUOUS | Q81_CTL_RI_DST_DFLTQ), 0); return; } int qls_set_allmulti(qla_host_t *ha) { int ret; ret = qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_E | Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ), Q81_CTL_RD_MCAST); return (ret); } void qls_reset_allmulti(qla_host_t *ha) { qls_load_route_idx_reg_locked(ha, (Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_ALLMULTI | Q81_CTL_RI_DST_DFLTQ), 0); return; } static int qls_init_fw_routing_table(qla_host_t *ha) { int ret = 0; ret = qls_clear_routing_table(ha); if (ret) return (-1); if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG, Q81_CTL_SEM_SET_RIDX_DATAREG)) { QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__)); return(-1); } ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DROP | Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_ALL_ERROR), Q81_CTL_RD_ERROR_PKT); if (ret) goto qls_init_fw_routing_table_exit; ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ | Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_BCAST), Q81_CTL_RD_BCAST); if (ret) goto qls_init_fw_routing_table_exit; if (ha->num_rx_rings > 1 ) { ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_RSS | Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_RSS_MATCH), Q81_CTL_RD_RSS_MATCH); if (ret) goto qls_init_fw_routing_table_exit; } ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ | Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_MCAST_MATCH), Q81_CTL_RD_MCAST_REG_MATCH); if (ret) goto qls_init_fw_routing_table_exit; ret = qls_load_route_idx_reg(ha, (Q81_CTL_RI_E | Q81_CTL_RI_DST_DFLTQ | Q81_CTL_RI_TYPE_NICQMASK | Q81_CTL_RI_IDX_CAM_HIT), Q81_CTL_RD_CAM_HIT); if (ret) goto qls_init_fw_routing_table_exit; qls_init_fw_routing_table_exit: qls_sem_unlock(ha, Q81_CTL_SEM_MASK_RIDX_DATAREG); return (ret); } static int qls_tx_tso_chksum(qla_host_t *ha, struct mbuf *mp, q81_tx_tso_t *tx_mac) { +#if defined(INET) || defined(INET6) struct ether_vlan_header *eh; struct ip *ip; +#if defined(INET6) struct ip6_hdr *ip6; +#endif struct tcphdr *th; uint32_t ehdrlen, ip_hlen; int ret = 0; uint16_t etype; uint8_t buf[sizeof(struct ip6_hdr)]; eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; etype = ntohs(eh->evl_proto); } else { ehdrlen = ETHER_HDR_LEN; etype = ntohs(eh->evl_encap_proto); } switch (etype) { +#ifdef INET case ETHERTYPE_IP: ip = (struct ip *)(mp->m_data + ehdrlen); ip_hlen = sizeof (struct ip); if (mp->m_len < (ehdrlen + ip_hlen)) { m_copydata(mp, ehdrlen, sizeof(struct ip), buf); ip = (struct ip *)buf; } tx_mac->opcode = Q81_IOCB_TX_TSO; tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV4 ; tx_mac->phdr_offsets = ehdrlen; tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) << Q81_TX_TSO_PHDR_SHIFT); ip->ip_sum = 0; if (mp->m_pkthdr.csum_flags & CSUM_TSO) { tx_mac->flags |= Q81_TX_TSO_FLAGS_LSO; th = (struct tcphdr *)(ip + 1); th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); tx_mac->mss = mp->m_pkthdr.tso_segsz; tx_mac->phdr_length = ip_hlen + ehdrlen + (th->th_off << 2); break; } tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ; if (ip->ip_p == IPPROTO_TCP) { tx_mac->flags |= Q81_TX_TSO_FLAGS_TC; } else if (ip->ip_p == IPPROTO_UDP) { tx_mac->flags |= Q81_TX_TSO_FLAGS_UC; } break; +#endif +#ifdef INET6 case ETHERTYPE_IPV6: ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); ip_hlen = sizeof(struct ip6_hdr); if (mp->m_len < (ehdrlen + ip_hlen)) { m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr), buf); ip6 = (struct ip6_hdr *)buf; } tx_mac->opcode = Q81_IOCB_TX_TSO; tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV6 ; tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ; tx_mac->phdr_offsets = ehdrlen; tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) << Q81_TX_TSO_PHDR_SHIFT); if (ip6->ip6_nxt == IPPROTO_TCP) { tx_mac->flags |= Q81_TX_TSO_FLAGS_TC; } else if (ip6->ip6_nxt == IPPROTO_UDP) { tx_mac->flags |= Q81_TX_TSO_FLAGS_UC; } break; +#endif default: ret = -1; break; } return (ret); +#else + return (-1); +#endif } #define QLA_TX_MIN_FREE 2 int qls_hw_tx_done(qla_host_t *ha, uint32_t txr_idx) { uint32_t txr_done, txr_next; txr_done = ha->tx_ring[txr_idx].txr_done; txr_next = ha->tx_ring[txr_idx].txr_next; if (txr_done == txr_next) { ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS; } else if (txr_done > txr_next) { ha->tx_ring[txr_idx].txr_free = txr_done - txr_next; } else { ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS + txr_done - txr_next; } if (ha->tx_ring[txr_idx].txr_free <= QLA_TX_MIN_FREE) return (-1); return (0); } /* * Name: qls_hw_send * Function: Transmits a packet. It first checks if the packet is a * candidate for Large TCP Segment Offload and then for UDP/TCP checksum * offload. If either of these creteria are not met, it is transmitted * as a regular ethernet frame. */ int qls_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs, uint32_t txr_next, struct mbuf *mp, uint32_t txr_idx) { q81_tx_mac_t *tx_mac; q81_txb_desc_t *tx_desc; uint32_t total_length = 0; uint32_t i; device_t dev; int ret = 0; dev = ha->pci_dev; total_length = mp->m_pkthdr.len; if (total_length > QLA_MAX_TSO_FRAME_SIZE) { device_printf(dev, "%s: total length exceeds maxlen(%d)\n", __func__, total_length); return (-1); } if (ha->tx_ring[txr_idx].txr_free <= (NUM_TX_DESCRIPTORS >> 2)) { if (qls_hw_tx_done(ha, txr_idx)) { device_printf(dev, "%s: tx_free[%d] = %d\n", __func__, txr_idx, ha->tx_ring[txr_idx].txr_free); return (-1); } } tx_mac = (q81_tx_mac_t *)&ha->tx_ring[txr_idx].wq_vaddr[txr_next]; bzero(tx_mac, sizeof(q81_tx_mac_t)); if ((mp->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO)) != 0) { ret = qls_tx_tso_chksum(ha, mp, (q81_tx_tso_t *)tx_mac); if (ret) return (EINVAL); if (mp->m_pkthdr.csum_flags & CSUM_TSO) ha->tx_ring[txr_idx].tx_tso_frames++; else ha->tx_ring[txr_idx].tx_frames++; } else { tx_mac->opcode = Q81_IOCB_TX_MAC; } if (mp->m_flags & M_VLANTAG) { tx_mac->vlan_tci = mp->m_pkthdr.ether_vtag; tx_mac->vlan_off |= Q81_TX_MAC_VLAN_OFF_V; ha->tx_ring[txr_idx].tx_vlan_frames++; } tx_mac->frame_length = total_length; tx_mac->tid_lo = txr_next; if (nsegs <= MAX_TX_MAC_DESC) { QL_DPRINT2((dev, "%s: 1 [%d, %d]\n", __func__, total_length, tx_mac->tid_lo)); for (i = 0; i < nsegs; i++) { tx_mac->txd[i].baddr = segs->ds_addr; tx_mac->txd[i].length = segs->ds_len; segs++; } tx_mac->txd[(nsegs - 1)].flags = Q81_RXB_DESC_FLAGS_E; } else { QL_DPRINT2((dev, "%s: 2 [%d, %d]\n", __func__, total_length, tx_mac->tid_lo)); tx_mac->txd[0].baddr = ha->tx_ring[txr_idx].tx_buf[txr_next].oal_paddr; tx_mac->txd[0].length = nsegs * (sizeof(q81_txb_desc_t)); tx_mac->txd[0].flags = Q81_RXB_DESC_FLAGS_C; tx_desc = ha->tx_ring[txr_idx].tx_buf[txr_next].oal_vaddr; for (i = 0; i < nsegs; i++) { tx_desc->baddr = segs->ds_addr; tx_desc->length = segs->ds_len; if (i == (nsegs -1)) tx_desc->flags = Q81_RXB_DESC_FLAGS_E; else tx_desc->flags = 0; segs++; tx_desc++; } } txr_next = (txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); ha->tx_ring[txr_idx].txr_next = txr_next; ha->tx_ring[txr_idx].txr_free--; Q81_WR_WQ_PROD_IDX(txr_idx, txr_next); return (0); } /* * Name: qls_del_hw_if * Function: Destroys the hardware specific entities corresponding to an * Ethernet Interface */ void qls_del_hw_if(qla_host_t *ha) { uint32_t value; int i; //int count; if (ha->hw_init == 0) { qls_hw_reset(ha); return; } for (i = 0; i < ha->num_tx_rings; i++) { Q81_SET_WQ_INVALID(i); } for (i = 0; i < ha->num_rx_rings; i++) { Q81_SET_CQ_INVALID(i); } for (i = 0; i < ha->num_rx_rings; i++) { Q81_DISABLE_INTR(ha, i); /* MSI-x i */ } value = (Q81_CTL_INTRE_IHD << Q81_CTL_INTRE_MASK_SHIFT); WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value); value = (Q81_CTL_INTRE_EI << Q81_CTL_INTRE_MASK_SHIFT); WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value); ha->flags.intr_enable = 0; qls_hw_reset(ha); return; } /* * Name: qls_init_hw_if * Function: Creates the hardware specific entities corresponding to an * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address * corresponding to the interface. Enables LRO if allowed. */ int qls_init_hw_if(qla_host_t *ha) { uint32_t value; int ret = 0; int i; QL_DPRINT2((ha->pci_dev, "%s:enter\n", __func__)); ret = qls_hw_reset(ha); if (ret) goto qls_init_hw_if_exit; ha->vm_pgsize = 4096; /* Enable FAE and EFE bits in System Register */ value = Q81_CTL_SYSTEM_ENABLE_FAE | Q81_CTL_SYSTEM_ENABLE_EFE; value = (value << Q81_CTL_SYSTEM_MASK_SHIFT) | value; WRITE_REG32(ha, Q81_CTL_SYSTEM, value); /* Set Default Completion Queue_ID in NIC Rcv Configuration Register */ value = (Q81_CTL_NIC_RCVC_DCQ_MASK << Q81_CTL_NIC_RCVC_MASK_SHIFT); WRITE_REG32(ha, Q81_CTL_NIC_RCV_CONFIG, value); /* Function Specific Control Register - Set Page Size and Enable NIC */ value = Q81_CTL_FUNC_SPECIFIC_FE | Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_MASK | Q81_CTL_FUNC_SPECIFIC_EPC_O | Q81_CTL_FUNC_SPECIFIC_EPC_I | Q81_CTL_FUNC_SPECIFIC_EC; value = (value << Q81_CTL_FUNC_SPECIFIC_MASK_SHIFT) | Q81_CTL_FUNC_SPECIFIC_FE | Q81_CTL_FUNC_SPECIFIC_VM_PGSIZE_4K | Q81_CTL_FUNC_SPECIFIC_EPC_O | Q81_CTL_FUNC_SPECIFIC_EPC_I | Q81_CTL_FUNC_SPECIFIC_EC; WRITE_REG32(ha, Q81_CTL_FUNC_SPECIFIC, value); /* Interrupt Mask Register */ value = Q81_CTL_INTRM_PI; value = (value << Q81_CTL_INTRM_MASK_SHIFT) | value; WRITE_REG32(ha, Q81_CTL_INTR_MASK, value); /* Initialiatize Completion Queue */ for (i = 0; i < ha->num_rx_rings; i++) { ret = qls_init_comp_queue(ha, i); if (ret) goto qls_init_hw_if_exit; } if (ha->num_rx_rings > 1 ) { ret = qls_init_rss(ha); if (ret) goto qls_init_hw_if_exit; } /* Initialize Work Queue */ for (i = 0; i < ha->num_tx_rings; i++) { ret = qls_init_work_queue(ha, i); if (ret) goto qls_init_hw_if_exit; } if (ret) goto qls_init_hw_if_exit; /* Set up CAM RAM with MAC Address */ ret = qls_config_unicast_mac_addr(ha, 1); if (ret) goto qls_init_hw_if_exit; ret = qls_hw_add_all_mcast(ha); if (ret) goto qls_init_hw_if_exit; /* Initialize Firmware Routing Table */ ret = qls_init_fw_routing_table(ha); if (ret) goto qls_init_hw_if_exit; /* Get Chip Revision ID */ ha->rev_id = READ_REG32(ha, Q81_CTL_REV_ID); /* Enable Global Interrupt */ value = Q81_CTL_INTRE_EI; value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value; WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value); /* Enable Interrupt Handshake Disable */ value = Q81_CTL_INTRE_IHD; value = (value << Q81_CTL_INTRE_MASK_SHIFT) | value; WRITE_REG32(ha, Q81_CTL_INTR_ENABLE, value); /* Enable Completion Interrupt */ ha->flags.intr_enable = 1; for (i = 0; i < ha->num_rx_rings; i++) { Q81_ENABLE_INTR(ha, i); /* MSI-x i */ } ha->hw_init = 1; qls_mbx_get_link_status(ha); QL_DPRINT2((ha->pci_dev, "%s:rxr [0x%08x]\n", __func__, ha->rx_ring[0].cq_db_offset)); QL_DPRINT2((ha->pci_dev, "%s:txr [0x%08x]\n", __func__, ha->tx_ring[0].wq_db_offset)); for (i = 0; i < ha->num_rx_rings; i++) { Q81_WR_CQ_CONS_IDX(i, 0); Q81_WR_LBQ_PROD_IDX(i, ha->rx_ring[i].lbq_in); Q81_WR_SBQ_PROD_IDX(i, ha->rx_ring[i].sbq_in); QL_DPRINT2((ha->pci_dev, "%s: [wq_idx, cq_idx, lbq_idx, sbq_idx]" "[0x%08x, 0x%08x, 0x%08x, 0x%08x]\n", __func__, Q81_RD_WQ_IDX(i), Q81_RD_CQ_IDX(i), Q81_RD_LBQ_IDX(i), Q81_RD_SBQ_IDX(i))); } for (i = 0; i < ha->num_rx_rings; i++) { Q81_SET_CQ_VALID(i); } qls_init_hw_if_exit: QL_DPRINT2((ha->pci_dev, "%s:exit\n", __func__)); return (ret); } static int qls_wait_for_config_reg_bits(qla_host_t *ha, uint32_t bits, uint32_t value) { uint32_t data32; uint32_t count = 3; while (count--) { data32 = READ_REG32(ha, Q81_CTL_CONFIG); if ((data32 & bits) == value) return (0); QLA_USEC_DELAY(100); } ha->qla_initiate_recovery = 1; device_printf(ha->pci_dev, "%s: failed\n", __func__); return (-1); } static uint8_t q81_hash_key[] = { 0xda, 0x56, 0x5a, 0x6d, 0xc2, 0x0e, 0x5b, 0x25, 0x3d, 0x25, 0x67, 0x41, 0xb0, 0x8f, 0xa3, 0x43, 0xcb, 0x2b, 0xca, 0xd0, 0xb4, 0x30, 0x7b, 0xae, 0xa3, 0x2d, 0xcb, 0x77, 0x0c, 0xf2, 0x30, 0x80, 0x3b, 0xb7, 0x42, 0x6a, 0xfa, 0x01, 0xac, 0xbe }; static int qls_init_rss(qla_host_t *ha) { q81_rss_icb_t *rss_icb; int ret = 0; int i; uint32_t value; rss_icb = ha->rss_dma.dma_b; bzero(rss_icb, sizeof (q81_rss_icb_t)); rss_icb->flags_base_cq_num = Q81_RSS_ICB_FLAGS_L4K | Q81_RSS_ICB_FLAGS_L6K | Q81_RSS_ICB_FLAGS_LI | Q81_RSS_ICB_FLAGS_LB | Q81_RSS_ICB_FLAGS_LM | Q81_RSS_ICB_FLAGS_RT4 | Q81_RSS_ICB_FLAGS_RT6; rss_icb->mask = 0x3FF; for (i = 0; i < Q81_RSS_ICB_NUM_INDTBL_ENTRIES; i++) { rss_icb->cq_id[i] = (i & (ha->num_rx_rings - 1)); } memcpy(rss_icb->ipv6_rss_hash_key, q81_hash_key, 40); memcpy(rss_icb->ipv4_rss_hash_key, q81_hash_key, 16); ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0); if (ret) goto qls_init_rss_exit; ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB); if (ret) { QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__)); goto qls_init_rss_exit; } value = (uint32_t)ha->rss_dma.dma_addr; WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value); value = (uint32_t)(ha->rss_dma.dma_addr >> 32); WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value); qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB); value = (Q81_CTL_CONFIG_LR << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LR; WRITE_REG32(ha, Q81_CTL_CONFIG, value); ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LR, 0); qls_init_rss_exit: return (ret); } static int qls_init_comp_queue(qla_host_t *ha, int cid) { q81_cq_icb_t *cq_icb; qla_rx_ring_t *rxr; int ret = 0; uint32_t value; rxr = &ha->rx_ring[cid]; rxr->cq_db_offset = ha->vm_pgsize * (128 + cid); cq_icb = rxr->cq_icb_vaddr; bzero(cq_icb, sizeof (q81_cq_icb_t)); cq_icb->msix_vector = cid; cq_icb->flags = Q81_CQ_ICB_FLAGS_LC | Q81_CQ_ICB_FLAGS_LI | Q81_CQ_ICB_FLAGS_LL | Q81_CQ_ICB_FLAGS_LS | Q81_CQ_ICB_FLAGS_LV; cq_icb->length_v = NUM_CQ_ENTRIES; cq_icb->cq_baddr_lo = (rxr->cq_base_paddr & 0xFFFFFFFF); cq_icb->cq_baddr_hi = (rxr->cq_base_paddr >> 32) & 0xFFFFFFFF; cq_icb->cqi_addr_lo = (rxr->cqi_paddr & 0xFFFFFFFF); cq_icb->cqi_addr_hi = (rxr->cqi_paddr >> 32) & 0xFFFFFFFF; cq_icb->pkt_idelay = 10; cq_icb->idelay = 100; cq_icb->lbq_baddr_lo = (rxr->lbq_addr_tbl_paddr & 0xFFFFFFFF); cq_icb->lbq_baddr_hi = (rxr->lbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF; cq_icb->lbq_bsize = QLA_LGB_SIZE; cq_icb->lbq_length = QLA_NUM_LGB_ENTRIES; cq_icb->sbq_baddr_lo = (rxr->sbq_addr_tbl_paddr & 0xFFFFFFFF); cq_icb->sbq_baddr_hi = (rxr->sbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF; cq_icb->sbq_bsize = (uint16_t)ha->msize; cq_icb->sbq_length = QLA_NUM_SMB_ENTRIES; QL_DUMP_CQ(ha); ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0); if (ret) goto qls_init_comp_queue_exit; ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB); if (ret) { QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__)); goto qls_init_comp_queue_exit; } value = (uint32_t)rxr->cq_icb_paddr; WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value); value = (uint32_t)(rxr->cq_icb_paddr >> 32); WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value); qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB); value = Q81_CTL_CONFIG_LCQ | Q81_CTL_CONFIG_Q_NUM_MASK; value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LCQ; value |= (cid << Q81_CTL_CONFIG_Q_NUM_SHIFT); WRITE_REG32(ha, Q81_CTL_CONFIG, value); ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LCQ, 0); rxr->cq_next = 0; rxr->lbq_next = rxr->lbq_free = 0; rxr->sbq_next = rxr->sbq_free = 0; rxr->rx_free = rxr->rx_next = 0; rxr->lbq_in = (QLA_NUM_LGB_ENTRIES - 1) & ~0xF; rxr->sbq_in = (QLA_NUM_SMB_ENTRIES - 1) & ~0xF; qls_init_comp_queue_exit: return (ret); } static int qls_init_work_queue(qla_host_t *ha, int wid) { q81_wq_icb_t *wq_icb; qla_tx_ring_t *txr; int ret = 0; uint32_t value; txr = &ha->tx_ring[wid]; txr->wq_db_addr = (struct resource *)((uint8_t *)ha->pci_reg1 + (ha->vm_pgsize * wid)); txr->wq_db_offset = (ha->vm_pgsize * wid); wq_icb = txr->wq_icb_vaddr; bzero(wq_icb, sizeof (q81_wq_icb_t)); wq_icb->length_v = NUM_TX_DESCRIPTORS | Q81_WQ_ICB_VALID; wq_icb->flags = Q81_WQ_ICB_FLAGS_LO | Q81_WQ_ICB_FLAGS_LI | Q81_WQ_ICB_FLAGS_LB | Q81_WQ_ICB_FLAGS_LC; wq_icb->wqcqid_rss = wid; wq_icb->baddr_lo = txr->wq_paddr & 0xFFFFFFFF; wq_icb->baddr_hi = (txr->wq_paddr >> 32)& 0xFFFFFFFF; wq_icb->ci_addr_lo = txr->txr_cons_paddr & 0xFFFFFFFF; wq_icb->ci_addr_hi = (txr->txr_cons_paddr >> 32)& 0xFFFFFFFF; ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0); if (ret) goto qls_init_wq_exit; ret = qls_sem_lock(ha, Q81_CTL_SEM_MASK_ICB, Q81_CTL_SEM_SET_ICB); if (ret) { QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__)); goto qls_init_wq_exit; } value = (uint32_t)txr->wq_icb_paddr; WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_LO, value); value = (uint32_t)(txr->wq_icb_paddr >> 32); WRITE_REG32(ha, Q81_CTL_ICB_ACCESS_ADDR_HI, value); qls_sem_unlock(ha, Q81_CTL_SEM_MASK_ICB); value = Q81_CTL_CONFIG_LRQ | Q81_CTL_CONFIG_Q_NUM_MASK; value = (value << Q81_CTL_CONFIG_MASK_SHIFT) | Q81_CTL_CONFIG_LRQ; value |= (wid << Q81_CTL_CONFIG_Q_NUM_SHIFT); WRITE_REG32(ha, Q81_CTL_CONFIG, value); ret = qls_wait_for_config_reg_bits(ha, Q81_CTL_CONFIG_LRQ, 0); txr->txr_free = NUM_TX_DESCRIPTORS; txr->txr_next = 0; txr->txr_done = 0; qls_init_wq_exit: return (ret); } static int qls_hw_add_all_mcast(qla_host_t *ha) { int i, nmcast; nmcast = ha->nmcast; for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) { if ((ha->mcast[i].addr[0] != 0) || (ha->mcast[i].addr[1] != 0) || (ha->mcast[i].addr[2] != 0) || (ha->mcast[i].addr[3] != 0) || (ha->mcast[i].addr[4] != 0) || (ha->mcast[i].addr[5] != 0)) { if (qls_config_mcast_mac_addr(ha, ha->mcast[i].addr, 1, i)) { device_printf(ha->pci_dev, "%s: failed\n", __func__); return (-1); } nmcast--; } } return 0; } static int qls_hw_add_mcast(qla_host_t *ha, uint8_t *mta) { int i; for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) return 0; /* its been already added */ } for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { if ((ha->mcast[i].addr[0] == 0) && (ha->mcast[i].addr[1] == 0) && (ha->mcast[i].addr[2] == 0) && (ha->mcast[i].addr[3] == 0) && (ha->mcast[i].addr[4] == 0) && (ha->mcast[i].addr[5] == 0)) { if (qls_config_mcast_mac_addr(ha, mta, 1, i)) return (-1); bcopy(mta, ha->mcast[i].addr, Q8_MAC_ADDR_LEN); ha->nmcast++; return 0; } } return 0; } static int qls_hw_del_mcast(qla_host_t *ha, uint8_t *mta) { int i; for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) { if (qls_config_mcast_mac_addr(ha, mta, 0, i)) return (-1); ha->mcast[i].addr[0] = 0; ha->mcast[i].addr[1] = 0; ha->mcast[i].addr[2] = 0; ha->mcast[i].addr[3] = 0; ha->mcast[i].addr[4] = 0; ha->mcast[i].addr[5] = 0; ha->nmcast--; return 0; } } return 0; } /* * Name: qls_hw_set_multi * Function: Sets the Multicast Addresses provided the host O.S into the * hardware (for the given interface) */ void qls_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt, uint32_t add_mac) { int i; for (i = 0; i < mcnt; i++) { if (add_mac) { if (qls_hw_add_mcast(ha, mta)) break; } else { if (qls_hw_del_mcast(ha, mta)) break; } mta += Q8_MAC_ADDR_LEN; } return; } void qls_update_link_state(qla_host_t *ha) { uint32_t link_state; uint32_t prev_link_state; if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) { ha->link_up = 0; return; } link_state = READ_REG32(ha, Q81_CTL_STATUS); prev_link_state = ha->link_up; if ((ha->pci_func & 0x1) == 0) ha->link_up = ((link_state & Q81_CTL_STATUS_PL0)? 1 : 0); else ha->link_up = ((link_state & Q81_CTL_STATUS_PL1)? 1 : 0); if (prev_link_state != ha->link_up) { if (ha->link_up) { if_link_state_change(ha->ifp, LINK_STATE_UP); } else { if_link_state_change(ha->ifp, LINK_STATE_DOWN); } } return; } static void qls_free_tx_ring_dma(qla_host_t *ha, int r_idx) { if (ha->tx_ring[r_idx].flags.wq_dma) { qls_free_dmabuf(ha, &ha->tx_ring[r_idx].wq_dma); ha->tx_ring[r_idx].flags.wq_dma = 0; } if (ha->tx_ring[r_idx].flags.privb_dma) { qls_free_dmabuf(ha, &ha->tx_ring[r_idx].privb_dma); ha->tx_ring[r_idx].flags.privb_dma = 0; } return; } static void qls_free_tx_dma(qla_host_t *ha) { int i, j; qla_tx_buf_t *txb; for (i = 0; i < ha->num_tx_rings; i++) { qls_free_tx_ring_dma(ha, i); for (j = 0; j < NUM_TX_DESCRIPTORS; j++) { txb = &ha->tx_ring[i].tx_buf[j]; if (txb->map) { bus_dmamap_destroy(ha->tx_tag, txb->map); } } } if (ha->tx_tag != NULL) { bus_dma_tag_destroy(ha->tx_tag); ha->tx_tag = NULL; } return; } static int qls_alloc_tx_ring_dma(qla_host_t *ha, int ridx) { int ret = 0, i; uint8_t *v_addr; bus_addr_t p_addr; qla_tx_buf_t *txb; device_t dev = ha->pci_dev; ha->tx_ring[ridx].wq_dma.alignment = 8; ha->tx_ring[ridx].wq_dma.size = NUM_TX_DESCRIPTORS * (sizeof (q81_tx_cmd_t)); ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].wq_dma); if (ret) { device_printf(dev, "%s: [%d] txr failed\n", __func__, ridx); goto qls_alloc_tx_ring_dma_exit; } ha->tx_ring[ridx].flags.wq_dma = 1; ha->tx_ring[ridx].privb_dma.alignment = 8; ha->tx_ring[ridx].privb_dma.size = QLA_TX_PRIVATE_BSIZE; ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].privb_dma); if (ret) { device_printf(dev, "%s: [%d] oalb failed\n", __func__, ridx); goto qls_alloc_tx_ring_dma_exit; } ha->tx_ring[ridx].flags.privb_dma = 1; ha->tx_ring[ridx].wq_vaddr = ha->tx_ring[ridx].wq_dma.dma_b; ha->tx_ring[ridx].wq_paddr = ha->tx_ring[ridx].wq_dma.dma_addr; v_addr = ha->tx_ring[ridx].privb_dma.dma_b; p_addr = ha->tx_ring[ridx].privb_dma.dma_addr; ha->tx_ring[ridx].wq_icb_vaddr = v_addr; ha->tx_ring[ridx].wq_icb_paddr = p_addr; ha->tx_ring[ridx].txr_cons_vaddr = (uint32_t *)(v_addr + (PAGE_SIZE >> 1)); ha->tx_ring[ridx].txr_cons_paddr = p_addr + (PAGE_SIZE >> 1); v_addr = v_addr + (PAGE_SIZE >> 1); p_addr = p_addr + (PAGE_SIZE >> 1); txb = ha->tx_ring[ridx].tx_buf; for (i = 0; i < NUM_TX_DESCRIPTORS; i++) { txb[i].oal_vaddr = v_addr; txb[i].oal_paddr = p_addr; v_addr = v_addr + QLA_OAL_BLK_SIZE; p_addr = p_addr + QLA_OAL_BLK_SIZE; } qls_alloc_tx_ring_dma_exit: return (ret); } static int qls_alloc_tx_dma(qla_host_t *ha) { int i, j; int ret = 0; qla_tx_buf_t *txb; if (bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ QLA_MAX_TSO_FRAME_SIZE, /* maxsize */ QLA_MAX_SEGMENTS, /* nsegments */ PAGE_SIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &ha->tx_tag)) { device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n", __func__); return (ENOMEM); } for (i = 0; i < ha->num_tx_rings; i++) { ret = qls_alloc_tx_ring_dma(ha, i); if (ret) { qls_free_tx_dma(ha); break; } for (j = 0; j < NUM_TX_DESCRIPTORS; j++) { txb = &ha->tx_ring[i].tx_buf[j]; ret = bus_dmamap_create(ha->tx_tag, BUS_DMA_NOWAIT, &txb->map); if (ret) { ha->err_tx_dmamap_create++; device_printf(ha->pci_dev, "%s: bus_dmamap_create failed[%d, %d, %d]\n", __func__, ret, i, j); qls_free_tx_dma(ha); return (ret); } } } return (ret); } static void qls_free_rss_dma(qla_host_t *ha) { qls_free_dmabuf(ha, &ha->rss_dma); ha->flags.rss_dma = 0; } static int qls_alloc_rss_dma(qla_host_t *ha) { int ret = 0; ha->rss_dma.alignment = 4; ha->rss_dma.size = PAGE_SIZE; ret = qls_alloc_dmabuf(ha, &ha->rss_dma); if (ret) device_printf(ha->pci_dev, "%s: failed\n", __func__); else ha->flags.rss_dma = 1; return (ret); } static void qls_free_mpi_dma(qla_host_t *ha) { qls_free_dmabuf(ha, &ha->mpi_dma); ha->flags.mpi_dma = 0; } static int qls_alloc_mpi_dma(qla_host_t *ha) { int ret = 0; ha->mpi_dma.alignment = 4; ha->mpi_dma.size = (0x4000 * 4); ret = qls_alloc_dmabuf(ha, &ha->mpi_dma); if (ret) device_printf(ha->pci_dev, "%s: failed\n", __func__); else ha->flags.mpi_dma = 1; return (ret); } static void qls_free_rx_ring_dma(qla_host_t *ha, int ridx) { if (ha->rx_ring[ridx].flags.cq_dma) { qls_free_dmabuf(ha, &ha->rx_ring[ridx].cq_dma); ha->rx_ring[ridx].flags.cq_dma = 0; } if (ha->rx_ring[ridx].flags.lbq_dma) { qls_free_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma); ha->rx_ring[ridx].flags.lbq_dma = 0; } if (ha->rx_ring[ridx].flags.sbq_dma) { qls_free_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma); ha->rx_ring[ridx].flags.sbq_dma = 0; } if (ha->rx_ring[ridx].flags.lb_dma) { qls_free_dmabuf(ha, &ha->rx_ring[ridx].lb_dma); ha->rx_ring[ridx].flags.lb_dma = 0; } return; } static void qls_free_rx_dma(qla_host_t *ha) { int i; for (i = 0; i < ha->num_rx_rings; i++) { qls_free_rx_ring_dma(ha, i); } if (ha->rx_tag != NULL) { bus_dma_tag_destroy(ha->rx_tag); ha->rx_tag = NULL; } return; } static int qls_alloc_rx_ring_dma(qla_host_t *ha, int ridx) { int i, ret = 0; uint8_t *v_addr; bus_addr_t p_addr; volatile q81_bq_addr_e_t *bq_e; device_t dev = ha->pci_dev; ha->rx_ring[ridx].cq_dma.alignment = 128; ha->rx_ring[ridx].cq_dma.size = (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t))) + PAGE_SIZE; ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].cq_dma); if (ret) { device_printf(dev, "%s: [%d] cq failed\n", __func__, ridx); goto qls_alloc_rx_ring_dma_exit; } ha->rx_ring[ridx].flags.cq_dma = 1; ha->rx_ring[ridx].lbq_dma.alignment = 8; ha->rx_ring[ridx].lbq_dma.size = QLA_LGBQ_AND_TABLE_SIZE; ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma); if (ret) { device_printf(dev, "%s: [%d] lbq failed\n", __func__, ridx); goto qls_alloc_rx_ring_dma_exit; } ha->rx_ring[ridx].flags.lbq_dma = 1; ha->rx_ring[ridx].sbq_dma.alignment = 8; ha->rx_ring[ridx].sbq_dma.size = QLA_SMBQ_AND_TABLE_SIZE; ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma); if (ret) { device_printf(dev, "%s: [%d] sbq failed\n", __func__, ridx); goto qls_alloc_rx_ring_dma_exit; } ha->rx_ring[ridx].flags.sbq_dma = 1; ha->rx_ring[ridx].lb_dma.alignment = 8; ha->rx_ring[ridx].lb_dma.size = (QLA_LGB_SIZE * QLA_NUM_LGB_ENTRIES); ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lb_dma); if (ret) { device_printf(dev, "%s: [%d] lb failed\n", __func__, ridx); goto qls_alloc_rx_ring_dma_exit; } ha->rx_ring[ridx].flags.lb_dma = 1; bzero(ha->rx_ring[ridx].cq_dma.dma_b, ha->rx_ring[ridx].cq_dma.size); bzero(ha->rx_ring[ridx].lbq_dma.dma_b, ha->rx_ring[ridx].lbq_dma.size); bzero(ha->rx_ring[ridx].sbq_dma.dma_b, ha->rx_ring[ridx].sbq_dma.size); bzero(ha->rx_ring[ridx].lb_dma.dma_b, ha->rx_ring[ridx].lb_dma.size); /* completion queue */ ha->rx_ring[ridx].cq_base_vaddr = ha->rx_ring[ridx].cq_dma.dma_b; ha->rx_ring[ridx].cq_base_paddr = ha->rx_ring[ridx].cq_dma.dma_addr; v_addr = ha->rx_ring[ridx].cq_dma.dma_b; p_addr = ha->rx_ring[ridx].cq_dma.dma_addr; v_addr = v_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t))); p_addr = p_addr + (NUM_CQ_ENTRIES * (sizeof (q81_cq_e_t))); /* completion queue icb */ ha->rx_ring[ridx].cq_icb_vaddr = v_addr; ha->rx_ring[ridx].cq_icb_paddr = p_addr; v_addr = v_addr + (PAGE_SIZE >> 2); p_addr = p_addr + (PAGE_SIZE >> 2); /* completion queue index register */ ha->rx_ring[ridx].cqi_vaddr = (uint32_t *)v_addr; ha->rx_ring[ridx].cqi_paddr = p_addr; v_addr = ha->rx_ring[ridx].lbq_dma.dma_b; p_addr = ha->rx_ring[ridx].lbq_dma.dma_addr; /* large buffer queue address table */ ha->rx_ring[ridx].lbq_addr_tbl_vaddr = v_addr; ha->rx_ring[ridx].lbq_addr_tbl_paddr = p_addr; /* large buffer queue */ ha->rx_ring[ridx].lbq_vaddr = v_addr + PAGE_SIZE; ha->rx_ring[ridx].lbq_paddr = p_addr + PAGE_SIZE; v_addr = ha->rx_ring[ridx].sbq_dma.dma_b; p_addr = ha->rx_ring[ridx].sbq_dma.dma_addr; /* small buffer queue address table */ ha->rx_ring[ridx].sbq_addr_tbl_vaddr = v_addr; ha->rx_ring[ridx].sbq_addr_tbl_paddr = p_addr; /* small buffer queue */ ha->rx_ring[ridx].sbq_vaddr = v_addr + PAGE_SIZE; ha->rx_ring[ridx].sbq_paddr = p_addr + PAGE_SIZE; ha->rx_ring[ridx].lb_vaddr = ha->rx_ring[ridx].lb_dma.dma_b; ha->rx_ring[ridx].lb_paddr = ha->rx_ring[ridx].lb_dma.dma_addr; /* Initialize Large Buffer Queue Table */ p_addr = ha->rx_ring[ridx].lbq_paddr; bq_e = ha->rx_ring[ridx].lbq_addr_tbl_vaddr; bq_e->addr_lo = p_addr & 0xFFFFFFFF; bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF; p_addr = ha->rx_ring[ridx].lb_paddr; bq_e = ha->rx_ring[ridx].lbq_vaddr; for (i = 0; i < QLA_NUM_LGB_ENTRIES; i++) { bq_e->addr_lo = p_addr & 0xFFFFFFFF; bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF; p_addr = p_addr + QLA_LGB_SIZE; bq_e++; } /* Initialize Small Buffer Queue Table */ p_addr = ha->rx_ring[ridx].sbq_paddr; bq_e = ha->rx_ring[ridx].sbq_addr_tbl_vaddr; for (i =0; i < (QLA_SBQ_SIZE/QLA_PAGE_SIZE); i++) { bq_e->addr_lo = p_addr & 0xFFFFFFFF; bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF; p_addr = p_addr + QLA_PAGE_SIZE; bq_e++; } qls_alloc_rx_ring_dma_exit: return (ret); } static int qls_alloc_rx_dma(qla_host_t *ha) { int i; int ret = 0; if (bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MJUM9BYTES, /* maxsize */ 1, /* nsegments */ MJUM9BYTES, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &ha->rx_tag)) { device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n", __func__); return (ENOMEM); } for (i = 0; i < ha->num_rx_rings; i++) { ret = qls_alloc_rx_ring_dma(ha, i); if (ret) { qls_free_rx_dma(ha); break; } } return (ret); } static int qls_wait_for_flash_ready(qla_host_t *ha) { uint32_t data32; uint32_t count = 3; while (count--) { data32 = READ_REG32(ha, Q81_CTL_FLASH_ADDR); if (data32 & Q81_CTL_FLASH_ADDR_ERR) goto qls_wait_for_flash_ready_exit; if (data32 & Q81_CTL_FLASH_ADDR_RDY) return (0); QLA_USEC_DELAY(100); } qls_wait_for_flash_ready_exit: QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__)); return (-1); } /* * Name: qls_rd_flash32 * Function: Read Flash Memory */ int qls_rd_flash32(qla_host_t *ha, uint32_t addr, uint32_t *data) { int ret; ret = qls_wait_for_flash_ready(ha); if (ret) return (ret); WRITE_REG32(ha, Q81_CTL_FLASH_ADDR, (addr | Q81_CTL_FLASH_ADDR_R)); ret = qls_wait_for_flash_ready(ha); if (ret) return (ret); *data = READ_REG32(ha, Q81_CTL_FLASH_DATA); return 0; } static int qls_flash_validate(qla_host_t *ha, const char *signature) { uint16_t csum16 = 0; uint16_t *data16; int i; if (bcmp(ha->flash.id, signature, 4)) { QL_DPRINT1((ha->pci_dev, "%s: invalid signature " "%x:%x:%x:%x %s\n", __func__, ha->flash.id[0], ha->flash.id[1], ha->flash.id[2], ha->flash.id[3], signature)); return(-1); } data16 = (uint16_t *)&ha->flash; for (i = 0; i < (sizeof (q81_flash_t) >> 1); i++) { csum16 += *data16++; } if (csum16) { QL_DPRINT1((ha->pci_dev, "%s: invalid checksum\n", __func__)); return(-1); } return(0); } int qls_rd_nic_params(qla_host_t *ha) { int i, ret = 0; uint32_t faddr; uint32_t *qflash; if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_FLASH, Q81_CTL_SEM_SET_FLASH)) { QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__)); return(-1); } if ((ha->pci_func & 0x1) == 0) faddr = Q81_F0_FLASH_OFFSET >> 2; else faddr = Q81_F1_FLASH_OFFSET >> 2; qflash = (uint32_t *)&ha->flash; for (i = 0; i < (sizeof(q81_flash_t) >> 2) ; i++) { ret = qls_rd_flash32(ha, faddr, qflash); if (ret) goto qls_rd_flash_data_exit; faddr++; qflash++; } QL_DUMP_BUFFER8(ha, __func__, (&ha->flash), (sizeof (q81_flash_t))); ret = qls_flash_validate(ha, Q81_FLASH_ID); if (ret) goto qls_rd_flash_data_exit; bcopy(ha->flash.mac_addr0, ha->mac_addr, ETHER_ADDR_LEN); QL_DPRINT1((ha->pci_dev, "%s: mac %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, ha->mac_addr[0], ha->mac_addr[1], ha->mac_addr[2], ha->mac_addr[3], ha->mac_addr[4], ha->mac_addr[5])); qls_rd_flash_data_exit: qls_sem_unlock(ha, Q81_CTL_SEM_MASK_FLASH); return(ret); } static int qls_sem_lock(qla_host_t *ha, uint32_t mask, uint32_t value) { uint32_t count = 30; uint32_t data; while (count--) { WRITE_REG32(ha, Q81_CTL_SEMAPHORE, (mask|value)); data = READ_REG32(ha, Q81_CTL_SEMAPHORE); if (data & value) { return (0); } else { QLA_USEC_DELAY(100); } } ha->qla_initiate_recovery = 1; return (-1); } static void qls_sem_unlock(qla_host_t *ha, uint32_t mask) { WRITE_REG32(ha, Q81_CTL_SEMAPHORE, mask); } static int qls_wait_for_proc_addr_ready(qla_host_t *ha) { uint32_t data32; uint32_t count = 3; while (count--) { data32 = READ_REG32(ha, Q81_CTL_PROC_ADDR); if (data32 & Q81_CTL_PROC_ADDR_ERR) goto qls_wait_for_proc_addr_ready_exit; if (data32 & Q81_CTL_PROC_ADDR_RDY) return (0); QLA_USEC_DELAY(100); } qls_wait_for_proc_addr_ready_exit: QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__)); ha->qla_initiate_recovery = 1; return (-1); } static int qls_proc_addr_rd_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg, uint32_t *data) { int ret; uint32_t value; ret = qls_wait_for_proc_addr_ready(ha); if (ret) goto qls_proc_addr_rd_reg_exit; value = addr_module | reg | Q81_CTL_PROC_ADDR_READ; WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value); ret = qls_wait_for_proc_addr_ready(ha); if (ret) goto qls_proc_addr_rd_reg_exit; *data = READ_REG32(ha, Q81_CTL_PROC_DATA); qls_proc_addr_rd_reg_exit: return (ret); } static int qls_proc_addr_wr_reg(qla_host_t *ha, uint32_t addr_module, uint32_t reg, uint32_t data) { int ret; uint32_t value; ret = qls_wait_for_proc_addr_ready(ha); if (ret) goto qls_proc_addr_wr_reg_exit; WRITE_REG32(ha, Q81_CTL_PROC_DATA, data); value = addr_module | reg; WRITE_REG32(ha, Q81_CTL_PROC_ADDR, value); ret = qls_wait_for_proc_addr_ready(ha); qls_proc_addr_wr_reg_exit: return (ret); } static int qls_hw_nic_reset(qla_host_t *ha) { int count; uint32_t data; device_t dev = ha->pci_dev; ha->hw_init = 0; data = (Q81_CTL_RESET_FUNC << Q81_CTL_RESET_MASK_SHIFT) | Q81_CTL_RESET_FUNC; WRITE_REG32(ha, Q81_CTL_RESET, data); count = 10; while (count--) { data = READ_REG32(ha, Q81_CTL_RESET); if ((data & Q81_CTL_RESET_FUNC) == 0) break; QLA_USEC_DELAY(10); } if (count == 0) { device_printf(dev, "%s: Bit 15 not cleared after Reset\n", __func__); return (-1); } return (0); } static int qls_hw_reset(qla_host_t *ha) { device_t dev = ha->pci_dev; int ret; int count; uint32_t data; QL_DPRINT2((ha->pci_dev, "%s:enter[%d]\n", __func__, ha->hw_init)); if (ha->hw_init == 0) { ret = qls_hw_nic_reset(ha); goto qls_hw_reset_exit; } ret = qls_clear_routing_table(ha); if (ret) goto qls_hw_reset_exit; ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_STOP); if (ret) goto qls_hw_reset_exit; /* * Wait for FIFO to empty */ count = 5; while (count--) { data = READ_REG32(ha, Q81_CTL_STATUS); if (data & Q81_CTL_STATUS_NFE) break; qls_mdelay(__func__, 100); } if (count == 0) { device_printf(dev, "%s: NFE bit not set\n", __func__); goto qls_hw_reset_exit; } count = 5; while (count--) { (void)qls_mbx_get_mgmt_ctrl(ha, &data); if ((data & Q81_MBX_GET_MGMT_CTL_FIFO_EMPTY) && (data & Q81_MBX_GET_MGMT_CTL_SET_MGMT)) break; qls_mdelay(__func__, 100); } if (count == 0) goto qls_hw_reset_exit; /* * Reset the NIC function */ ret = qls_hw_nic_reset(ha); if (ret) goto qls_hw_reset_exit; ret = qls_mbx_set_mgmt_ctrl(ha, Q81_MBX_SET_MGMT_CTL_RESUME); qls_hw_reset_exit: if (ret) device_printf(dev, "%s: failed\n", __func__); return (ret); } /* * MPI Related Functions */ int qls_mpi_risc_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data) { int ret; ret = qls_proc_addr_rd_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC, reg, data); return (ret); } int qls_mpi_risc_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data) { int ret; ret = qls_proc_addr_wr_reg(ha, Q81_CTL_PROC_ADDR_MPI_RISC, reg, data); return (ret); } int qls_mbx_rd_reg(qla_host_t *ha, uint32_t reg, uint32_t *data) { int ret; if ((ha->pci_func & 0x1) == 0) reg += Q81_FUNC0_MBX_OUT_REG0; else reg += Q81_FUNC1_MBX_OUT_REG0; ret = qls_mpi_risc_rd_reg(ha, reg, data); return (ret); } int qls_mbx_wr_reg(qla_host_t *ha, uint32_t reg, uint32_t data) { int ret; if ((ha->pci_func & 0x1) == 0) reg += Q81_FUNC0_MBX_IN_REG0; else reg += Q81_FUNC1_MBX_IN_REG0; ret = qls_mpi_risc_wr_reg(ha, reg, data); return (ret); } static int qls_mbx_cmd(qla_host_t *ha, uint32_t *in_mbx, uint32_t i_count, uint32_t *out_mbx, uint32_t o_count) { int i, ret = -1; uint32_t data32; uint32_t count = 50; QL_DPRINT2((ha->pci_dev, "%s: enter[0x%08x 0x%08x 0x%08x]\n", __func__, *in_mbx, *(in_mbx + 1), *(in_mbx + 2))); data32 = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS); if (data32 & Q81_CTL_HCS_HTR_INTR) { device_printf(ha->pci_dev, "%s: cmd_status[0x%08x]\n", __func__, data32); goto qls_mbx_cmd_exit; } if (qls_sem_lock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV, Q81_CTL_SEM_SET_PROC_ADDR_NIC_RCV)) { device_printf(ha->pci_dev, "%s: semlock failed\n", __func__); goto qls_mbx_cmd_exit; } ha->mbx_done = 0; for (i = 0; i < i_count; i++) { ret = qls_mbx_wr_reg(ha, i, *in_mbx); if (ret) { device_printf(ha->pci_dev, "%s: mbx_wr[%d, 0x%08x] failed\n", __func__, i, *in_mbx); qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV); goto qls_mbx_cmd_exit; } in_mbx++; } WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, Q81_CTL_HCS_CMD_SET_HTR_INTR); qls_sem_unlock(ha, Q81_CTL_SEM_MASK_PROC_ADDR_NIC_RCV); ret = -1; ha->mbx_done = 0; while (count--) { if (ha->flags.intr_enable == 0) { data32 = READ_REG32(ha, Q81_CTL_STATUS); if (!(data32 & Q81_CTL_STATUS_PI)) { qls_mdelay(__func__, 100); continue; } ret = qls_mbx_rd_reg(ha, 0, &data32); if (ret == 0 ) { if ((data32 & 0xF000) == 0x4000) { out_mbx[0] = data32; for (i = 1; i < o_count; i++) { ret = qls_mbx_rd_reg(ha, i, &data32); if (ret) { device_printf( ha->pci_dev, "%s: mbx_rd[%d]" " failed\n", __func__, i); break; } out_mbx[i] = data32; } break; } else if ((data32 & 0xF000) == 0x8000) { count = 50; WRITE_REG32(ha,\ Q81_CTL_HOST_CMD_STATUS,\ Q81_CTL_HCS_CMD_CLR_RTH_INTR); } } } else { if (ha->mbx_done) { for (i = 1; i < o_count; i++) { out_mbx[i] = ha->mbox[i]; } ret = 0; break; } } qls_mdelay(__func__, 1000); } qls_mbx_cmd_exit: if (ha->flags.intr_enable == 0) { WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\ Q81_CTL_HCS_CMD_CLR_RTH_INTR); } if (ret) { ha->qla_initiate_recovery = 1; } QL_DPRINT2((ha->pci_dev, "%s: exit[%d]\n", __func__, ret)); return (ret); } static int qls_mbx_set_mgmt_ctrl(qla_host_t *ha, uint32_t t_ctrl) { uint32_t *mbox; device_t dev = ha->pci_dev; mbox = ha->mbox; bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS)); mbox[0] = Q81_MBX_SET_MGMT_CTL; mbox[1] = t_ctrl; if (qls_mbx_cmd(ha, mbox, 2, mbox, 1)) { device_printf(dev, "%s failed\n", __func__); return (-1); } if ((mbox[0] == Q81_MBX_CMD_COMPLETE) || ((t_ctrl == Q81_MBX_SET_MGMT_CTL_STOP) && (mbox[0] == Q81_MBX_CMD_ERROR))){ return (0); } device_printf(dev, "%s failed [0x%08x]\n", __func__, mbox[0]); return (-1); } static int qls_mbx_get_mgmt_ctrl(qla_host_t *ha, uint32_t *t_status) { uint32_t *mbox; device_t dev = ha->pci_dev; *t_status = 0; mbox = ha->mbox; bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS)); mbox[0] = Q81_MBX_GET_MGMT_CTL; if (qls_mbx_cmd(ha, mbox, 1, mbox, 2)) { device_printf(dev, "%s failed\n", __func__); return (-1); } *t_status = mbox[1]; return (0); } static void qls_mbx_get_link_status(qla_host_t *ha) { uint32_t *mbox; device_t dev = ha->pci_dev; mbox = ha->mbox; bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS)); mbox[0] = Q81_MBX_GET_LNK_STATUS; if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) { device_printf(dev, "%s failed\n", __func__); return; } ha->link_status = mbox[1]; ha->link_down_info = mbox[2]; ha->link_hw_info = mbox[3]; ha->link_dcbx_counters = mbox[4]; ha->link_change_counters = mbox[5]; device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", __func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]); return; } static void qls_mbx_about_fw(qla_host_t *ha) { uint32_t *mbox; device_t dev = ha->pci_dev; mbox = ha->mbox; bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS)); mbox[0] = Q81_MBX_ABOUT_FW; if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) { device_printf(dev, "%s failed\n", __func__); return; } device_printf(dev, "%s 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", __func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]); } int qls_mbx_dump_risc_ram(qla_host_t *ha, void *buf, uint32_t r_addr, uint32_t r_size) { bus_addr_t b_paddr; uint32_t *mbox; device_t dev = ha->pci_dev; mbox = ha->mbox; bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS)); bzero(ha->mpi_dma.dma_b,(r_size << 2)); b_paddr = ha->mpi_dma.dma_addr; mbox[0] = Q81_MBX_DUMP_RISC_RAM; mbox[1] = r_addr & 0xFFFF; mbox[2] = ((uint32_t)(b_paddr >> 16)) & 0xFFFF; mbox[3] = ((uint32_t)b_paddr) & 0xFFFF; mbox[4] = (r_size >> 16) & 0xFFFF; mbox[5] = r_size & 0xFFFF; mbox[6] = ((uint32_t)(b_paddr >> 48)) & 0xFFFF; mbox[7] = ((uint32_t)(b_paddr >> 32)) & 0xFFFF; mbox[8] = (r_addr >> 16) & 0xFFFF; bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map, BUS_DMASYNC_PREREAD); if (qls_mbx_cmd(ha, mbox, 9, mbox, 1)) { device_printf(dev, "%s failed\n", __func__); return (-1); } if (mbox[0] != 0x4000) { device_printf(ha->pci_dev, "%s: failed!\n", __func__); return (-1); } else { bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map, BUS_DMASYNC_POSTREAD); bcopy(ha->mpi_dma.dma_b, buf, (r_size << 2)); } return (0); } int qls_mpi_reset(qla_host_t *ha) { int count; uint32_t data; device_t dev = ha->pci_dev; WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\ Q81_CTL_HCS_CMD_SET_RISC_RESET); count = 10; while (count--) { data = READ_REG32(ha, Q81_CTL_HOST_CMD_STATUS); if (data & Q81_CTL_HCS_RISC_RESET) { WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS,\ Q81_CTL_HCS_CMD_CLR_RISC_RESET); break; } qls_mdelay(__func__, 10); } if (count == 0) { device_printf(dev, "%s: failed\n", __func__); return (-1); } return (0); } diff --git a/sys/dev/qlxge/qls_isr.c b/sys/dev/qlxge/qls_isr.c index e77aab97c9a1..38da2d2c3408 100644 --- a/sys/dev/qlxge/qls_isr.c +++ b/sys/dev/qlxge/qls_isr.c @@ -1,386 +1,396 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013-2014 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * File: qls_isr.c * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. */ #include __FBSDID("$FreeBSD$"); #include "qls_os.h" #include "qls_hw.h" #include "qls_def.h" #include "qls_inline.h" #include "qls_ver.h" #include "qls_glbl.h" #include "qls_dbg.h" static void qls_tx_comp(qla_host_t *ha, uint32_t txr_idx, q81_tx_mac_comp_t *tx_comp) { qla_tx_buf_t *txb; uint32_t tx_idx = tx_comp->tid_lo; if (tx_idx >= NUM_TX_DESCRIPTORS) { ha->qla_initiate_recovery = 1; return; } txb = &ha->tx_ring[txr_idx].tx_buf[tx_idx]; if (txb->m_head) { if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1); bus_dmamap_sync(ha->tx_tag, txb->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ha->tx_tag, txb->map); m_freem(txb->m_head); txb->m_head = NULL; } ha->tx_ring[txr_idx].txr_done++; if (ha->tx_ring[txr_idx].txr_done == NUM_TX_DESCRIPTORS) ha->tx_ring[txr_idx].txr_done = 0; } static void qls_replenish_rx(qla_host_t *ha, uint32_t r_idx) { qla_rx_buf_t *rxb; qla_rx_ring_t *rxr; int count; volatile q81_bq_addr_e_t *sbq_e; rxr = &ha->rx_ring[r_idx]; count = rxr->rx_free; sbq_e = rxr->sbq_vaddr; while (count--) { rxb = &rxr->rx_buf[rxr->sbq_next]; if (rxb->m_head == NULL) { if (qls_get_mbuf(ha, rxb, NULL) != 0) { device_printf(ha->pci_dev, "%s: qls_get_mbuf [0,%d,%d] failed\n", __func__, rxr->sbq_next, r_idx); rxb->m_head = NULL; break; } } if (rxb->m_head != NULL) { sbq_e[rxr->sbq_next].addr_lo = (uint32_t)rxb->paddr; sbq_e[rxr->sbq_next].addr_hi = (uint32_t)(rxb->paddr >> 32); rxr->sbq_next++; if (rxr->sbq_next == NUM_RX_DESCRIPTORS) rxr->sbq_next = 0; rxr->sbq_free++; rxr->rx_free--; } if (rxr->sbq_free == 16) { rxr->sbq_in += 16; rxr->sbq_in = rxr->sbq_in & (NUM_RX_DESCRIPTORS - 1); rxr->sbq_free = 0; Q81_WR_SBQ_PROD_IDX(r_idx, (rxr->sbq_in)); } } } static int qls_rx_comp(qla_host_t *ha, uint32_t rxr_idx, uint32_t cq_idx, q81_rx_t *cq_e) { qla_rx_buf_t *rxb; qla_rx_ring_t *rxr; device_t dev = ha->pci_dev; struct mbuf *mp = NULL; struct ifnet *ifp = ha->ifp; +#if defined(INET) || defined(INET6) struct lro_ctrl *lro; +#endif struct ether_vlan_header *eh; rxr = &ha->rx_ring[rxr_idx]; +#if defined(INET) || defined(INET6) lro = &rxr->lro; +#endif rxb = &rxr->rx_buf[rxr->rx_next]; if (!(cq_e->flags1 & Q81_RX_FLAGS1_DS)) { device_printf(dev, "%s: DS bit not set \n", __func__); return -1; } if (rxb->paddr != cq_e->b_paddr) { device_printf(dev, "%s: (rxb->paddr != cq_e->b_paddr)[%p, %p] \n", __func__, (void *)rxb->paddr, (void *)cq_e->b_paddr); Q81_SET_CQ_INVALID(cq_idx); ha->qla_initiate_recovery = 1; return(-1); } rxr->rx_int++; if ((cq_e->flags1 & Q81_RX_FLAGS1_ERR_MASK) == 0) { mp = rxb->m_head; rxb->m_head = NULL; if (mp == NULL) { device_printf(dev, "%s: mp == NULL\n", __func__); } else { mp->m_flags |= M_PKTHDR; mp->m_pkthdr.len = cq_e->length; mp->m_pkthdr.rcvif = ifp; mp->m_len = cq_e->length; eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { uint32_t *data = (uint32_t *)eh; mp->m_pkthdr.ether_vtag = ntohs(eh->evl_tag); mp->m_flags |= M_VLANTAG; *(data + 3) = *(data + 2); *(data + 2) = *(data + 1); *(data + 1) = *data; m_adj(mp, ETHER_VLAN_ENCAP_LEN); } if ((cq_e->flags1 & Q81_RX_FLAGS1_RSS_MATCH_MASK)) { rxr->rss_int++; mp->m_pkthdr.flowid = cq_e->rss; M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE_HASH); } if (cq_e->flags0 & (Q81_RX_FLAGS0_TE | Q81_RX_FLAGS0_NU | Q81_RX_FLAGS0_IE)) { mp->m_pkthdr.csum_flags = 0; } else { mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR; mp->m_pkthdr.csum_data = 0xFFFF; } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); +#if defined(INET) || defined(INET6) if (lro->lro_cnt && (tcp_lro_rx(lro, mp, 0) == 0)) { /* LRO packet has been successfully queued */ - } else { + } else +#endif + { (*ifp->if_input)(ifp, mp); } } } else { device_printf(dev, "%s: err [0%08x]\n", __func__, cq_e->flags1); } rxr->rx_free++; rxr->rx_next++; if (rxr->rx_next == NUM_RX_DESCRIPTORS) rxr->rx_next = 0; if ((rxr->rx_free + rxr->sbq_free) >= 16) qls_replenish_rx(ha, rxr_idx); return 0; } static void qls_cq_isr(qla_host_t *ha, uint32_t cq_idx) { q81_cq_e_t *cq_e, *cq_b; uint32_t i, cq_comp_idx; int ret = 0, tx_comp_done = 0; - struct lro_ctrl *lro; +#if defined(INET) || defined(INET6) + struct lro_ctrl *lro = &ha->rx_ring[cq_idx].lro; +#endif cq_b = ha->rx_ring[cq_idx].cq_base_vaddr; - lro = &ha->rx_ring[cq_idx].lro; cq_comp_idx = *(ha->rx_ring[cq_idx].cqi_vaddr); i = ha->rx_ring[cq_idx].cq_next; while (i != cq_comp_idx) { cq_e = &cq_b[i]; switch (cq_e->opcode) { case Q81_IOCB_TX_MAC: case Q81_IOCB_TX_TSO: qls_tx_comp(ha, cq_idx, (q81_tx_mac_comp_t *)cq_e); tx_comp_done++; break; case Q81_IOCB_RX: ret = qls_rx_comp(ha, cq_idx, i, (q81_rx_t *)cq_e); break; case Q81_IOCB_MPI: case Q81_IOCB_SYS: default: device_printf(ha->pci_dev, "%s[%d %d 0x%x]: illegal \n", __func__, i, (*(ha->rx_ring[cq_idx].cqi_vaddr)), cq_e->opcode); qls_dump_buf32(ha, __func__, cq_e, (sizeof (q81_cq_e_t) >> 2)); break; } i++; if (i == NUM_CQ_ENTRIES) i = 0; if (ret) { break; } if (i == cq_comp_idx) { cq_comp_idx = *(ha->rx_ring[cq_idx].cqi_vaddr); } if (tx_comp_done) { taskqueue_enqueue(ha->tx_tq, &ha->tx_task); tx_comp_done = 0; } } +#if defined(INET) || defined(INET6) tcp_lro_flush_all(lro); +#endif ha->rx_ring[cq_idx].cq_next = cq_comp_idx; if (!ret) { Q81_WR_CQ_CONS_IDX(cq_idx, (ha->rx_ring[cq_idx].cq_next)); } if (tx_comp_done) taskqueue_enqueue(ha->tx_tq, &ha->tx_task); return; } static void qls_mbx_isr(qla_host_t *ha) { uint32_t data; int i; device_t dev = ha->pci_dev; if (qls_mbx_rd_reg(ha, 0, &data) == 0) { if ((data & 0xF000) == 0x4000) { ha->mbox[0] = data; for (i = 1; i < Q81_NUM_MBX_REGISTERS; i++) { if (qls_mbx_rd_reg(ha, i, &data)) break; ha->mbox[i] = data; } ha->mbx_done = 1; } else if ((data & 0xF000) == 0x8000) { /* we have an AEN */ ha->aen[0] = data; for (i = 1; i < Q81_NUM_AEN_REGISTERS; i++) { if (qls_mbx_rd_reg(ha, i, &data)) break; ha->aen[i] = data; } device_printf(dev,"%s: AEN " "[0x%08x 0x%08x 0x%08x 0x%08x 0x%08x" " 0x%08x 0x%08x 0x%08x 0x%08x]\n", __func__, ha->aen[0], ha->aen[1], ha->aen[2], ha->aen[3], ha->aen[4], ha->aen[5], ha->aen[6], ha->aen[7], ha->aen[8]); switch ((ha->aen[0] & 0xFFFF)) { case 0x8011: ha->link_up = 1; break; case 0x8012: ha->link_up = 0; break; case 0x8130: ha->link_hw_info = ha->aen[1]; break; case 0x8131: ha->link_hw_info = 0; break; } } } WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, Q81_CTL_HCS_CMD_CLR_RTH_INTR); return; } void qls_isr(void *arg) { qla_ivec_t *ivec = arg; qla_host_t *ha; uint32_t status; uint32_t cq_idx; device_t dev; ha = ivec->ha; cq_idx = ivec->cq_idx; dev = ha->pci_dev; status = READ_REG32(ha, Q81_CTL_STATUS); if (status & Q81_CTL_STATUS_FE) { device_printf(dev, "%s fatal error\n", __func__); return; } if ((cq_idx == 0) && (status & Q81_CTL_STATUS_PI)) { qls_mbx_isr(ha); } status = READ_REG32(ha, Q81_CTL_INTR_STATUS1); if (status & ( 0x1 << cq_idx)) qls_cq_isr(ha, cq_idx); Q81_ENABLE_INTR(ha, cq_idx); return; } diff --git a/sys/dev/qlxge/qls_os.c b/sys/dev/qlxge/qls_os.c index 66216eeb7aed..7749cd10150c 100644 --- a/sys/dev/qlxge/qls_os.c +++ b/sys/dev/qlxge/qls_os.c @@ -1,1495 +1,1503 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013-2014 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * File: qls_os.c * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. */ #include __FBSDID("$FreeBSD$"); #include "qls_os.h" #include "qls_hw.h" #include "qls_def.h" #include "qls_inline.h" #include "qls_ver.h" #include "qls_glbl.h" #include "qls_dbg.h" #include /* * Some PCI Configuration Space Related Defines */ #ifndef PCI_VENDOR_QLOGIC #define PCI_VENDOR_QLOGIC 0x1077 #endif #ifndef PCI_DEVICE_QLOGIC_8000 #define PCI_DEVICE_QLOGIC_8000 0x8000 #endif #define PCI_QLOGIC_DEV8000 \ ((PCI_DEVICE_QLOGIC_8000 << 16) | PCI_VENDOR_QLOGIC) /* * static functions */ static int qls_alloc_parent_dma_tag(qla_host_t *ha); static void qls_free_parent_dma_tag(qla_host_t *ha); static void qls_flush_xmt_bufs(qla_host_t *ha); static int qls_alloc_rcv_bufs(qla_host_t *ha); static void qls_free_rcv_bufs(qla_host_t *ha); static void qls_init_ifnet(device_t dev, qla_host_t *ha); static void qls_release(qla_host_t *ha); static void qls_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error); static void qls_stop(qla_host_t *ha); static int qls_send(qla_host_t *ha, struct mbuf **m_headp); static void qls_tx_done(void *context, int pending); static int qls_config_lro(qla_host_t *ha); static void qls_free_lro(qla_host_t *ha); static void qls_error_recovery(void *context, int pending); /* * Hooks to the Operating Systems */ static int qls_pci_probe (device_t); static int qls_pci_attach (device_t); static int qls_pci_detach (device_t); static void qls_start(struct ifnet *ifp); static void qls_init(void *arg); static int qls_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); static int qls_media_change(struct ifnet *ifp); static void qls_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); static device_method_t qla_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, qls_pci_probe), DEVMETHOD(device_attach, qls_pci_attach), DEVMETHOD(device_detach, qls_pci_detach), { 0, 0 } }; static driver_t qla_pci_driver = { "ql", qla_pci_methods, sizeof (qla_host_t), }; static devclass_t qla8000_devclass; DRIVER_MODULE(qla8000, pci, qla_pci_driver, qla8000_devclass, 0, 0); MODULE_DEPEND(qla8000, pci, 1, 1, 1); MODULE_DEPEND(qla8000, ether, 1, 1, 1); MALLOC_DEFINE(M_QLA8000BUF, "qla8000buf", "Buffers for qla8000 driver"); static char dev_str[64]; static char ver_str[64]; /* * Name: qls_pci_probe * Function: Validate the PCI device to be a QLA80XX device */ static int qls_pci_probe(device_t dev) { switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { case PCI_QLOGIC_DEV8000: snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d", "Qlogic ISP 8000 PCI CNA Adapter-Ethernet Function", QLA_VERSION_MAJOR, QLA_VERSION_MINOR, QLA_VERSION_BUILD); snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d", QLA_VERSION_MAJOR, QLA_VERSION_MINOR, QLA_VERSION_BUILD); device_set_desc(dev, dev_str); break; default: return (ENXIO); } if (bootverbose) printf("%s: %s\n ", __func__, dev_str); return (BUS_PROBE_DEFAULT); } static int qls_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS) { int err = 0, ret; qla_host_t *ha; uint32_t i; err = sysctl_handle_int(oidp, &ret, 0, req); if (err || !req->newptr) return (err); if (ret == 1) { ha = (qla_host_t *)arg1; for (i = 0; i < ha->num_tx_rings; i++) { device_printf(ha->pci_dev, "%s: tx_ring[%d].tx_frames= %p\n", __func__, i, (void *)ha->tx_ring[i].tx_frames); device_printf(ha->pci_dev, "%s: tx_ring[%d].tx_tso_frames= %p\n", __func__, i, (void *)ha->tx_ring[i].tx_tso_frames); device_printf(ha->pci_dev, "%s: tx_ring[%d].tx_vlan_frames= %p\n", __func__, i, (void *)ha->tx_ring[i].tx_vlan_frames); device_printf(ha->pci_dev, "%s: tx_ring[%d].txr_free= 0x%08x\n", __func__, i, ha->tx_ring[i].txr_free); device_printf(ha->pci_dev, "%s: tx_ring[%d].txr_next= 0x%08x\n", __func__, i, ha->tx_ring[i].txr_next); device_printf(ha->pci_dev, "%s: tx_ring[%d].txr_done= 0x%08x\n", __func__, i, ha->tx_ring[i].txr_done); device_printf(ha->pci_dev, "%s: tx_ring[%d].txr_cons_idx= 0x%08x\n", __func__, i, *(ha->tx_ring[i].txr_cons_vaddr)); } for (i = 0; i < ha->num_rx_rings; i++) { device_printf(ha->pci_dev, "%s: rx_ring[%d].rx_int= %p\n", __func__, i, (void *)ha->rx_ring[i].rx_int); device_printf(ha->pci_dev, "%s: rx_ring[%d].rss_int= %p\n", __func__, i, (void *)ha->rx_ring[i].rss_int); device_printf(ha->pci_dev, "%s: rx_ring[%d].lbq_next= 0x%08x\n", __func__, i, ha->rx_ring[i].lbq_next); device_printf(ha->pci_dev, "%s: rx_ring[%d].lbq_free= 0x%08x\n", __func__, i, ha->rx_ring[i].lbq_free); device_printf(ha->pci_dev, "%s: rx_ring[%d].lbq_in= 0x%08x\n", __func__, i, ha->rx_ring[i].lbq_in); device_printf(ha->pci_dev, "%s: rx_ring[%d].sbq_next= 0x%08x\n", __func__, i, ha->rx_ring[i].sbq_next); device_printf(ha->pci_dev, "%s: rx_ring[%d].sbq_free= 0x%08x\n", __func__, i, ha->rx_ring[i].sbq_free); device_printf(ha->pci_dev, "%s: rx_ring[%d].sbq_in= 0x%08x\n", __func__, i, ha->rx_ring[i].sbq_in); } device_printf(ha->pci_dev, "%s: err_m_getcl = 0x%08x\n", __func__, ha->err_m_getcl); device_printf(ha->pci_dev, "%s: err_m_getjcl = 0x%08x\n", __func__, ha->err_m_getjcl); device_printf(ha->pci_dev, "%s: err_tx_dmamap_create = 0x%08x\n", __func__, ha->err_tx_dmamap_create); device_printf(ha->pci_dev, "%s: err_tx_dmamap_load = 0x%08x\n", __func__, ha->err_tx_dmamap_load); device_printf(ha->pci_dev, "%s: err_tx_defrag = 0x%08x\n", __func__, ha->err_tx_defrag); } return (err); } static void qls_add_sysctls(qla_host_t *ha) { device_t dev = ha->pci_dev; SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "version", CTLFLAG_RD, ver_str, 0, "Driver Version"); qls_dbg_level = 0; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &qls_dbg_level, qls_dbg_level, "Debug Level"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0, qls_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics"); return; } static void qls_watchdog(void *arg) { qla_host_t *ha = arg; struct ifnet *ifp; ifp = ha->ifp; if (ha->flags.qla_watchdog_exit) { ha->qla_watchdog_exited = 1; return; } ha->qla_watchdog_exited = 0; if (!ha->flags.qla_watchdog_pause) { if (ha->qla_initiate_recovery) { ha->qla_watchdog_paused = 1; ha->qla_initiate_recovery = 0; ha->err_inject = 0; taskqueue_enqueue(ha->err_tq, &ha->err_task); } else if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) { taskqueue_enqueue(ha->tx_tq, &ha->tx_task); } ha->qla_watchdog_paused = 0; } else { ha->qla_watchdog_paused = 1; } ha->watchdog_ticks = (ha->watchdog_ticks + 1) % 1000; callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, qls_watchdog, ha); return; } /* * Name: qls_pci_attach * Function: attaches the device to the operating system */ static int qls_pci_attach(device_t dev) { qla_host_t *ha = NULL; int i; QL_DPRINT2((dev, "%s: enter\n", __func__)); if ((ha = device_get_softc(dev)) == NULL) { device_printf(dev, "cannot get softc\n"); return (ENOMEM); } memset(ha, 0, sizeof (qla_host_t)); if (pci_get_device(dev) != PCI_DEVICE_QLOGIC_8000) { device_printf(dev, "device is not QLE8000\n"); return (ENXIO); } ha->pci_func = pci_get_function(dev); ha->pci_dev = dev; pci_enable_busmaster(dev); ha->reg_rid = PCIR_BAR(1); ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, RF_ACTIVE); if (ha->pci_reg == NULL) { device_printf(dev, "unable to map any ports\n"); goto qls_pci_attach_err; } ha->reg_rid1 = PCIR_BAR(3); ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid1, RF_ACTIVE); if (ha->pci_reg1 == NULL) { device_printf(dev, "unable to map any ports\n"); goto qls_pci_attach_err; } mtx_init(&ha->hw_lock, "qla80xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); mtx_init(&ha->tx_lock, "qla80xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF); qls_add_sysctls(ha); qls_hw_add_sysctls(ha); ha->flags.lock_init = 1; ha->msix_count = pci_msix_count(dev); if (ha->msix_count < qls_get_msix_count(ha)) { device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, ha->msix_count); goto qls_pci_attach_err; } ha->msix_count = qls_get_msix_count(ha); device_printf(dev, "\n%s: ha %p pci_func 0x%x msix_count 0x%x" " pci_reg %p pci_reg1 %p\n", __func__, ha, ha->pci_func, ha->msix_count, ha->pci_reg, ha->pci_reg1); if (pci_alloc_msix(dev, &ha->msix_count)) { device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__, ha->msix_count); ha->msix_count = 0; goto qls_pci_attach_err; } for (i = 0; i < ha->num_rx_rings; i++) { ha->irq_vec[i].cq_idx = i; ha->irq_vec[i].ha = ha; ha->irq_vec[i].irq_rid = 1 + i; ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ha->irq_vec[i].irq_rid, (RF_ACTIVE | RF_SHAREABLE)); if (ha->irq_vec[i].irq == NULL) { device_printf(dev, "could not allocate interrupt\n"); goto qls_pci_attach_err; } if (bus_setup_intr(dev, ha->irq_vec[i].irq, (INTR_TYPE_NET | INTR_MPSAFE), NULL, qls_isr, &ha->irq_vec[i], &ha->irq_vec[i].handle)) { device_printf(dev, "could not setup interrupt\n"); goto qls_pci_attach_err; } } qls_rd_nic_params(ha); /* allocate parent dma tag */ if (qls_alloc_parent_dma_tag(ha)) { device_printf(dev, "%s: qls_alloc_parent_dma_tag failed\n", __func__); goto qls_pci_attach_err; } /* alloc all dma buffers */ if (qls_alloc_dma(ha)) { device_printf(dev, "%s: qls_alloc_dma failed\n", __func__); goto qls_pci_attach_err; } /* create the o.s ethernet interface */ qls_init_ifnet(dev, ha); ha->flags.qla_watchdog_active = 1; ha->flags.qla_watchdog_pause = 1; TASK_INIT(&ha->tx_task, 0, qls_tx_done, ha); ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT, taskqueue_thread_enqueue, &ha->tx_tq); taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq", device_get_nameunit(ha->pci_dev)); callout_init(&ha->tx_callout, 1); ha->flags.qla_callout_init = 1; /* create ioctl device interface */ if (qls_make_cdev(ha)) { device_printf(dev, "%s: qls_make_cdev failed\n", __func__); goto qls_pci_attach_err; } callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, qls_watchdog, ha); TASK_INIT(&ha->err_task, 0, qls_error_recovery, ha); ha->err_tq = taskqueue_create_fast("qla_errq", M_NOWAIT, taskqueue_thread_enqueue, &ha->err_tq); taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq", device_get_nameunit(ha->pci_dev)); QL_DPRINT2((dev, "%s: exit 0\n", __func__)); return (0); qls_pci_attach_err: qls_release(ha); QL_DPRINT2((dev, "%s: exit ENXIO\n", __func__)); return (ENXIO); } /* * Name: qls_pci_detach * Function: Unhooks the device from the operating system */ static int qls_pci_detach(device_t dev) { qla_host_t *ha = NULL; QL_DPRINT2((dev, "%s: enter\n", __func__)); if ((ha = device_get_softc(dev)) == NULL) { device_printf(dev, "cannot get softc\n"); return (ENOMEM); } (void)QLA_LOCK(ha, __func__, 0); qls_stop(ha); QLA_UNLOCK(ha, __func__); qls_release(ha); QL_DPRINT2((dev, "%s: exit\n", __func__)); return (0); } /* * Name: qls_release * Function: Releases the resources allocated for the device */ static void qls_release(qla_host_t *ha) { device_t dev; int i; dev = ha->pci_dev; if (ha->err_tq) { taskqueue_drain(ha->err_tq, &ha->err_task); taskqueue_free(ha->err_tq); } if (ha->tx_tq) { taskqueue_drain(ha->tx_tq, &ha->tx_task); taskqueue_free(ha->tx_tq); } qls_del_cdev(ha); if (ha->flags.qla_watchdog_active) { ha->flags.qla_watchdog_exit = 1; while (ha->qla_watchdog_exited == 0) qls_mdelay(__func__, 1); } if (ha->flags.qla_callout_init) callout_stop(&ha->tx_callout); if (ha->ifp != NULL) ether_ifdetach(ha->ifp); qls_free_dma(ha); qls_free_parent_dma_tag(ha); for (i = 0; i < ha->num_rx_rings; i++) { if (ha->irq_vec[i].handle) { (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, ha->irq_vec[i].handle); } if (ha->irq_vec[i].irq) { (void)bus_release_resource(dev, SYS_RES_IRQ, ha->irq_vec[i].irq_rid, ha->irq_vec[i].irq); } } if (ha->msix_count) pci_release_msi(dev); if (ha->flags.lock_init) { mtx_destroy(&ha->tx_lock); mtx_destroy(&ha->hw_lock); } if (ha->pci_reg) (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, ha->pci_reg); if (ha->pci_reg1) (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1, ha->pci_reg1); } /* * DMA Related Functions */ static void qls_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { *((bus_addr_t *)arg) = 0; if (error) { printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); return; } *((bus_addr_t *)arg) = segs[0].ds_addr; return; } int qls_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) { int ret = 0; device_t dev; bus_addr_t b_addr; dev = ha->pci_dev; QL_DPRINT2((dev, "%s: enter\n", __func__)); ret = bus_dma_tag_create( ha->parent_tag,/* parent */ dma_buf->alignment, ((bus_size_t)(1ULL << 32)),/* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dma_buf->size, /* maxsize */ 1, /* nsegments */ dma_buf->size, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &dma_buf->dma_tag); if (ret) { device_printf(dev, "%s: could not create dma tag\n", __func__); goto qls_alloc_dmabuf_exit; } ret = bus_dmamem_alloc(dma_buf->dma_tag, (void **)&dma_buf->dma_b, (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), &dma_buf->dma_map); if (ret) { bus_dma_tag_destroy(dma_buf->dma_tag); device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__); goto qls_alloc_dmabuf_exit; } ret = bus_dmamap_load(dma_buf->dma_tag, dma_buf->dma_map, dma_buf->dma_b, dma_buf->size, qls_dmamap_callback, &b_addr, BUS_DMA_NOWAIT); if (ret || !b_addr) { bus_dma_tag_destroy(dma_buf->dma_tag); bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); ret = -1; goto qls_alloc_dmabuf_exit; } dma_buf->dma_addr = b_addr; qls_alloc_dmabuf_exit: QL_DPRINT2((dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n", __func__, ret, (void *)dma_buf->dma_tag, (void *)dma_buf->dma_map, (void *)dma_buf->dma_b, dma_buf->size)); return ret; } void qls_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) { bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); bus_dma_tag_destroy(dma_buf->dma_tag); } static int qls_alloc_parent_dma_tag(qla_host_t *ha) { int ret; device_t dev; dev = ha->pci_dev; /* * Allocate parent DMA Tag */ ret = bus_dma_tag_create( bus_get_dma_tag(dev), /* parent */ 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &ha->parent_tag); if (ret) { device_printf(dev, "%s: could not create parent dma tag\n", __func__); return (-1); } ha->flags.parent_tag = 1; return (0); } static void qls_free_parent_dma_tag(qla_host_t *ha) { if (ha->flags.parent_tag) { bus_dma_tag_destroy(ha->parent_tag); ha->flags.parent_tag = 0; } } /* * Name: qls_init_ifnet * Function: Creates the Network Device Interface and Registers it with the O.S */ static void qls_init_ifnet(device_t dev, qla_host_t *ha) { struct ifnet *ifp; QL_DPRINT2((dev, "%s: enter\n", __func__)); ifp = ha->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_baudrate = IF_Gbps(10); ifp->if_init = qls_init; ifp->if_softc = ha; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = qls_ioctl; ifp->if_start = qls_start; IFQ_SET_MAXLEN(&ifp->if_snd, qls_get_ifq_snd_maxlen(ha)); ifp->if_snd.ifq_drv_maxlen = qls_get_ifq_snd_maxlen(ha); IFQ_SET_READY(&ifp->if_snd); ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; if (ha->max_frame_size <= MCLBYTES) { ha->msize = MCLBYTES; } else if (ha->max_frame_size <= MJUMPAGESIZE) { ha->msize = MJUMPAGESIZE; } else ha->msize = MJUM9BYTES; ether_ifattach(ifp, qls_get_mac_addr(ha)); ifp->if_capabilities = IFCAP_JUMBO_MTU; ifp->if_capabilities |= IFCAP_HWCSUM; ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capabilities |= IFCAP_TSO4; ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; ifp->if_capabilities |= IFCAP_VLAN_HWTSO; ifp->if_capabilities |= IFCAP_LINKSTATE; ifp->if_capenable = ifp->if_capabilities; ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifmedia_init(&ha->media, IFM_IMASK, qls_media_change, qls_media_status); ifmedia_add(&ha->media, (IFM_ETHER | qls_get_optics(ha) | IFM_FDX), 0, NULL); ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); QL_DPRINT2((dev, "%s: exit\n", __func__)); return; } static void qls_init_locked(qla_host_t *ha) { struct ifnet *ifp = ha->ifp; qls_stop(ha); qls_flush_xmt_bufs(ha); if (qls_alloc_rcv_bufs(ha) != 0) return; if (qls_config_lro(ha)) return; bcopy(IF_LLADDR(ha->ifp), ha->mac_addr, ETHER_ADDR_LEN); ifp->if_hwassist = CSUM_IP; ifp->if_hwassist |= CSUM_TCP; ifp->if_hwassist |= CSUM_UDP; ifp->if_hwassist |= CSUM_TSO; if (qls_init_hw_if(ha) == 0) { ifp = ha->ifp; ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ha->flags.qla_watchdog_pause = 0; } return; } static void qls_init(void *arg) { qla_host_t *ha; ha = (qla_host_t *)arg; QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); (void)QLA_LOCK(ha, __func__, 0); qls_init_locked(ha); QLA_UNLOCK(ha, __func__); QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__)); } static u_int qls_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt) { uint8_t *mta = arg; if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS) return (0); bcopy(LLADDR(sdl), &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN); return (1); } static void qls_set_multi(qla_host_t *ha, uint32_t add_multi) { uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN]; struct ifnet *ifp = ha->ifp; int mcnt; mcnt = if_foreach_llmaddr(ifp, qls_copy_maddr, mta); if (QLA_LOCK(ha, __func__, 1) == 0) { qls_hw_set_multi(ha, mta, mcnt, add_multi); QLA_UNLOCK(ha, __func__); } return; } static int qls_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { int ret = 0; struct ifreq *ifr = (struct ifreq *)data; +#ifdef INET struct ifaddr *ifa = (struct ifaddr *)data; +#endif qla_host_t *ha; ha = (qla_host_t *)ifp->if_softc; switch (cmd) { case SIOCSIFADDR: QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n", __func__, cmd)); +#ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) { ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { (void)QLA_LOCK(ha, __func__, 0); qls_init_locked(ha); QLA_UNLOCK(ha, __func__); } QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", __func__, cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr))); arp_ifinit(ifp, ifa); - } else { - ether_ioctl(ifp, cmd, data); + break; } +#endif + ether_ioctl(ifp, cmd, data); break; case SIOCSIFMTU: QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n", __func__, cmd)); if (ifr->ifr_mtu > QLA_MAX_MTU) { ret = EINVAL; } else { (void) QLA_LOCK(ha, __func__, 0); ifp->if_mtu = ifr->ifr_mtu; ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; QLA_UNLOCK(ha, __func__); if (ret) ret = EINVAL; } break; case SIOCSIFFLAGS: QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n", __func__, cmd)); (void)QLA_LOCK(ha, __func__, 0); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { if ((ifp->if_flags ^ ha->if_flags) & IFF_PROMISC) { ret = qls_set_promisc(ha); } else if ((ifp->if_flags ^ ha->if_flags) & IFF_ALLMULTI) { ret = qls_set_allmulti(ha); } } else { ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; qls_init_locked(ha); } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) qls_stop(ha); ha->if_flags = ifp->if_flags; } QLA_UNLOCK(ha, __func__); break; case SIOCADDMULTI: QL_DPRINT4((ha->pci_dev, "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd)); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { qls_set_multi(ha, 1); } break; case SIOCDELMULTI: QL_DPRINT4((ha->pci_dev, "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd)); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { qls_set_multi(ha, 0); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", __func__, cmd)); ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); break; case SIOCSIFCAP: { int mask = ifr->ifr_reqcap ^ ifp->if_capenable; QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n", __func__, cmd)); if (mask & IFCAP_HWCSUM) ifp->if_capenable ^= IFCAP_HWCSUM; if (mask & IFCAP_TSO4) ifp->if_capenable ^= IFCAP_TSO4; if (mask & IFCAP_VLAN_HWTAGGING) ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (mask & IFCAP_VLAN_HWTSO) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) qls_init(ha); VLAN_CAPABILITIES(ifp); break; } default: QL_DPRINT4((ha->pci_dev, "%s: default (0x%lx)\n", __func__, cmd)); ret = ether_ioctl(ifp, cmd, data); break; } return (ret); } static int qls_media_change(struct ifnet *ifp) { qla_host_t *ha; struct ifmedia *ifm; int ret = 0; ha = (qla_host_t *)ifp->if_softc; QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); ifm = &ha->media; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) ret = EINVAL; QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__)); return (ret); } static void qls_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { qla_host_t *ha; ha = (qla_host_t *)ifp->if_softc; QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; qls_update_link_state(ha); if (ha->link_up) { ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= (IFM_FDX | qls_get_optics(ha)); } QL_DPRINT2((ha->pci_dev, "%s: exit (%s)\n", __func__,\ (ha->link_up ? "link_up" : "link_down"))); return; } static void qls_start(struct ifnet *ifp) { int i, ret = 0; struct mbuf *m_head; qla_host_t *ha = (qla_host_t *)ifp->if_softc; QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__)); if (!mtx_trylock(&ha->tx_lock)) { QL_DPRINT8((ha->pci_dev, "%s: mtx_trylock(&ha->tx_lock) failed\n", __func__)); return; } if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == IFF_DRV_RUNNING) { for (i = 0; i < ha->num_tx_rings; i++) { ret |= qls_hw_tx_done(ha, i); } if (ret == 0) ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) { QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__)); QLA_TX_UNLOCK(ha); return; } if (!ha->link_up) { qls_update_link_state(ha); if (!ha->link_up) { QL_DPRINT8((ha->pci_dev, "%s: link down\n", __func__)); QLA_TX_UNLOCK(ha); return; } } while (ifp->if_snd.ifq_head != NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) { QL_DPRINT8((ha->pci_dev, "%s: m_head == NULL\n", __func__)); break; } if (qls_send(ha, &m_head)) { if (m_head == NULL) break; QL_DPRINT8((ha->pci_dev, "%s: PREPEND\n", __func__)); ifp->if_drv_flags |= IFF_DRV_OACTIVE; IF_PREPEND(&ifp->if_snd, m_head); break; } /* Send a copy of the frame to the BPF listener */ ETHER_BPF_MTAP(ifp, m_head); } QLA_TX_UNLOCK(ha); QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__)); return; } static int qls_send(qla_host_t *ha, struct mbuf **m_headp) { bus_dma_segment_t segs[QLA_MAX_SEGMENTS]; bus_dmamap_t map; int nsegs; int ret = -1; uint32_t tx_idx; struct mbuf *m_head = *m_headp; uint32_t txr_idx = 0; QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__)); /* check if flowid is set */ if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE) txr_idx = m_head->m_pkthdr.flowid & (ha->num_tx_rings - 1); tx_idx = ha->tx_ring[txr_idx].txr_next; map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map; ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (ret == EFBIG) { struct mbuf *m; QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__, m_head->m_pkthdr.len)); m = m_defrag(m_head, M_NOWAIT); if (m == NULL) { ha->err_tx_defrag++; m_freem(m_head); *m_headp = NULL; device_printf(ha->pci_dev, "%s: m_defrag() = NULL [%d]\n", __func__, ret); return (ENOBUFS); } m_head = m; *m_headp = m_head; if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, BUS_DMA_NOWAIT))) { ha->err_tx_dmamap_load++; device_printf(ha->pci_dev, "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n", __func__, ret, m_head->m_pkthdr.len); if (ret != ENOMEM) { m_freem(m_head); *m_headp = NULL; } return (ret); } } else if (ret) { ha->err_tx_dmamap_load++; device_printf(ha->pci_dev, "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n", __func__, ret, m_head->m_pkthdr.len); if (ret != ENOMEM) { m_freem(m_head); *m_headp = NULL; } return (ret); } QL_ASSERT(ha, (nsegs != 0), ("qls_send: empty packet")); bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); if (!(ret = qls_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx))) { ha->tx_ring[txr_idx].count++; ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head; ha->tx_ring[txr_idx].tx_buf[tx_idx].map = map; } else { if (ret == EINVAL) { if (m_head) m_freem(m_head); *m_headp = NULL; } } QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__)); return (ret); } static void qls_stop(qla_host_t *ha) { struct ifnet *ifp = ha->ifp; ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); ha->flags.qla_watchdog_pause = 1; while (!ha->qla_watchdog_paused) qls_mdelay(__func__, 1); qls_del_hw_if(ha); qls_free_lro(ha); qls_flush_xmt_bufs(ha); qls_free_rcv_bufs(ha); return; } /* * Buffer Management Functions for Transmit and Receive Rings */ /* * Release mbuf after it sent on the wire */ static void qls_flush_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb) { QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); if (txb->m_head) { bus_dmamap_unload(ha->tx_tag, txb->map); m_freem(txb->m_head); txb->m_head = NULL; } QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__)); } static void qls_flush_xmt_bufs(qla_host_t *ha) { int i, j; for (j = 0; j < ha->num_tx_rings; j++) { for (i = 0; i < NUM_TX_DESCRIPTORS; i++) qls_flush_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]); } return; } static int qls_alloc_rcv_mbufs(qla_host_t *ha, int r) { int i, j, ret = 0; qla_rx_buf_t *rxb; qla_rx_ring_t *rx_ring; volatile q81_bq_addr_e_t *sbq_e; rx_ring = &ha->rx_ring[r]; for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { rxb = &rx_ring->rx_buf[i]; ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map); if (ret) { device_printf(ha->pci_dev, "%s: dmamap[%d, %d] failed\n", __func__, r, i); for (j = 0; j < i; j++) { rxb = &rx_ring->rx_buf[j]; bus_dmamap_destroy(ha->rx_tag, rxb->map); } goto qls_alloc_rcv_mbufs_err; } } rx_ring = &ha->rx_ring[r]; sbq_e = rx_ring->sbq_vaddr; rxb = &rx_ring->rx_buf[0]; for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { if (!(ret = qls_get_mbuf(ha, rxb, NULL))) { /* * set the physical address in the * corresponding descriptor entry in the * receive ring/queue for the hba */ sbq_e->addr_lo = rxb->paddr & 0xFFFFFFFF; sbq_e->addr_hi = (rxb->paddr >> 32) & 0xFFFFFFFF; } else { device_printf(ha->pci_dev, "%s: qls_get_mbuf [%d, %d] failed\n", __func__, r, i); bus_dmamap_destroy(ha->rx_tag, rxb->map); goto qls_alloc_rcv_mbufs_err; } rxb++; sbq_e++; } return 0; qls_alloc_rcv_mbufs_err: return (-1); } static void qls_free_rcv_bufs(qla_host_t *ha) { int i, r; qla_rx_buf_t *rxb; qla_rx_ring_t *rxr; for (r = 0; r < ha->num_rx_rings; r++) { rxr = &ha->rx_ring[r]; for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { rxb = &rxr->rx_buf[i]; if (rxb->m_head != NULL) { bus_dmamap_unload(ha->rx_tag, rxb->map); bus_dmamap_destroy(ha->rx_tag, rxb->map); m_freem(rxb->m_head); } } bzero(rxr->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS)); } return; } static int qls_alloc_rcv_bufs(qla_host_t *ha) { int r, ret = 0; qla_rx_ring_t *rxr; for (r = 0; r < ha->num_rx_rings; r++) { rxr = &ha->rx_ring[r]; bzero(rxr->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS)); } for (r = 0; r < ha->num_rx_rings; r++) { ret = qls_alloc_rcv_mbufs(ha, r); if (ret) qls_free_rcv_bufs(ha); } return (ret); } int qls_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp) { struct mbuf *mp = nmp; int ret = 0; uint32_t offset; bus_dma_segment_t segs[1]; int nsegs; QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); if (mp == NULL) { mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ha->msize); if (mp == NULL) { if (ha->msize == MCLBYTES) ha->err_m_getcl++; else ha->err_m_getjcl++; ret = ENOBUFS; device_printf(ha->pci_dev, "%s: m_getcl failed\n", __func__); goto exit_qls_get_mbuf; } mp->m_len = mp->m_pkthdr.len = ha->msize; } else { mp->m_len = mp->m_pkthdr.len = ha->msize; mp->m_data = mp->m_ext.ext_buf; mp->m_next = NULL; } /* align the receive buffers to 8 byte boundary */ offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL); if (offset) { offset = 8 - offset; m_adj(mp, offset); } /* * Using memory from the mbuf cluster pool, invoke the bus_dma * machinery to arrange the memory mapping. */ ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map, mp, segs, &nsegs, BUS_DMA_NOWAIT); rxb->paddr = segs[0].ds_addr; if (ret || !rxb->paddr || (nsegs != 1)) { m_freem(mp); rxb->m_head = NULL; device_printf(ha->pci_dev, "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n", __func__, ret, (long long unsigned int)rxb->paddr, nsegs); ret = -1; goto exit_qls_get_mbuf; } rxb->m_head = mp; bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD); exit_qls_get_mbuf: QL_DPRINT2((ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret)); return (ret); } static void qls_tx_done(void *context, int pending) { qla_host_t *ha = context; struct ifnet *ifp; ifp = ha->ifp; if (!ifp) return; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__)); return; } qls_start(ha->ifp); return; } static int qls_config_lro(qla_host_t *ha) { +#if defined(INET) || defined(INET6) int i; struct lro_ctrl *lro; for (i = 0; i < ha->num_rx_rings; i++) { lro = &ha->rx_ring[i].lro; if (tcp_lro_init(lro)) { device_printf(ha->pci_dev, "%s: tcp_lro_init failed\n", __func__); return (-1); } lro->ifp = ha->ifp; } ha->flags.lro_init = 1; QL_DPRINT2((ha->pci_dev, "%s: LRO initialized\n", __func__)); +#endif return (0); } static void qls_free_lro(qla_host_t *ha) { +#if defined(INET) || defined(INET6) int i; struct lro_ctrl *lro; if (!ha->flags.lro_init) return; for (i = 0; i < ha->num_rx_rings; i++) { lro = &ha->rx_ring[i].lro; tcp_lro_free(lro); } ha->flags.lro_init = 0; +#endif } static void qls_error_recovery(void *context, int pending) { qla_host_t *ha = context; qls_init(ha); return; } diff --git a/sys/dev/qlxge/qls_os.h b/sys/dev/qlxge/qls_os.h index ac97575c8ffa..b10a37ecd5d3 100644 --- a/sys/dev/qlxge/qls_os.h +++ b/sys/dev/qlxge/qls_os.h @@ -1,159 +1,161 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013-2014 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * File: qls_os.h * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. */ #ifndef _QLS_OS_H_ #define _QLS_OS_H_ +#include "opt_inet.h" + #include #include #include #include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version < 900044 #error FreeBSD Version not supported - use version >= 900044 #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define QLA_USEC_DELAY(usec) DELAY(usec) static __inline int qls_ms_to_hz(int ms) { int qla_hz; struct timeval t; t.tv_sec = ms / 1000; t.tv_usec = (ms % 1000) * 1000; qla_hz = tvtohz(&t); if (qla_hz < 0) qla_hz = 0x7fffffff; if (!qla_hz) qla_hz = 1; return (qla_hz); } static __inline int qls_sec_to_hz(int sec) { struct timeval t; t.tv_sec = sec; t.tv_usec = 0; return (tvtohz(&t)); } #define qla_host_to_le16(x) htole16(x) #define qla_host_to_le32(x) htole32(x) #define qla_host_to_le64(x) htole64(x) #define qla_host_to_be16(x) htobe16(x) #define qla_host_to_be32(x) htobe32(x) #define qla_host_to_be64(x) htobe64(x) #define qla_le16_to_host(x) le16toh(x) #define qla_le32_to_host(x) le32toh(x) #define qla_le64_to_host(x) le64toh(x) #define qla_be16_to_host(x) be16toh(x) #define qla_be32_to_host(x) be32toh(x) #define qla_be64_to_host(x) be64toh(x) MALLOC_DECLARE(M_QLA8XXXBUF); #define qls_mdelay(fn, msecs) \ {\ if (cold) \ DELAY((msecs * 1000)); \ else \ pause(fn, qls_ms_to_hz(msecs)); \ } /* * Locks */ #define QLA_LOCK(ha, str, no_delay) qls_lock(ha, str, no_delay) #define QLA_UNLOCK(ha, str) qls_unlock(ha, str) #define QLA_TX_LOCK(ha) mtx_lock(&ha->tx_lock); #define QLA_TX_UNLOCK(ha) mtx_unlock(&ha->tx_lock); #endif /* #ifndef _QLS_OS_H_ */