Index: head/sys/dev/liquidio/base/cn23xx_pf_device.c =================================================================== --- head/sys/dev/liquidio/base/cn23xx_pf_device.c (revision 324993) +++ head/sys/dev/liquidio/base/cn23xx_pf_device.c (revision 324994) @@ -1,1008 +1,1008 @@ /* * BSD LICENSE * * Copyright(c) 2017 Cavium, Inc.. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Cavium, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "lio_bsd.h" #include "lio_common.h" #include "lio_droq.h" #include "lio_iq.h" #include "lio_response_manager.h" #include "lio_device.h" #include "cn23xx_pf_device.h" #include "lio_main.h" #include "lio_rss.h" static int lio_cn23xx_pf_soft_reset(struct octeon_device *oct) { lio_write_csr64(oct, LIO_CN23XX_SLI_WIN_WR_MASK_REG, 0xFF); lio_dev_dbg(oct, "BIST enabled for CN23XX soft reset\n"); lio_write_csr64(oct, LIO_CN23XX_SLI_SCRATCH1, 0x1234ULL); /* Initiate chip-wide soft reset */ lio_pci_readq(oct, LIO_CN23XX_RST_SOFT_RST); lio_pci_writeq(oct, 1, LIO_CN23XX_RST_SOFT_RST); /* Wait for 100ms as Octeon resets. */ lio_mdelay(100); if (lio_read_csr64(oct, LIO_CN23XX_SLI_SCRATCH1)) { lio_dev_err(oct, "Soft reset failed\n"); return (1); } lio_dev_dbg(oct, "Reset completed\n"); /* restore the reset value */ lio_write_csr64(oct, LIO_CN23XX_SLI_WIN_WR_MASK_REG, 0xFF); return (0); } static void lio_cn23xx_pf_enable_error_reporting(struct octeon_device *oct) { uint32_t corrtable_err_status, uncorrectable_err_mask, regval; regval = lio_read_pci_cfg(oct, LIO_CN23XX_CFG_PCIE_DEVCTL); if (regval & LIO_CN23XX_CFG_PCIE_DEVCTL_MASK) { uncorrectable_err_mask = 0; corrtable_err_status = 0; uncorrectable_err_mask = lio_read_pci_cfg(oct, LIO_CN23XX_CFG_PCIE_UNCORRECT_ERR_MASK); corrtable_err_status = lio_read_pci_cfg(oct, LIO_CN23XX_CFG_PCIE_CORRECT_ERR_STATUS); lio_dev_err(oct, "PCI-E Fatal error detected;\n" "\tdev_ctl_status_reg = 0x%08x\n" "\tuncorrectable_error_mask_reg = 0x%08x\n" "\tcorrectable_error_status_reg = 0x%08x\n", regval, uncorrectable_err_mask, corrtable_err_status); } regval |= 0xf; /* Enable Link error reporting */ lio_dev_dbg(oct, "Enabling PCI-E error reporting..\n"); lio_write_pci_cfg(oct, LIO_CN23XX_CFG_PCIE_DEVCTL, regval); } static uint32_t lio_cn23xx_pf_coprocessor_clock(struct octeon_device *oct) { /* * Bits 29:24 of RST_BOOT[PNR_MUL] holds the ref.clock MULTIPLIER * for SLI. */ /* TBD: get the info in Hand-shake */ return (((lio_pci_readq(oct, LIO_CN23XX_RST_BOOT) >> 24) & 0x3f) * 50); } uint32_t lio_cn23xx_pf_get_oq_ticks(struct octeon_device *oct, uint32_t time_intr_in_us) { /* This gives the SLI clock per microsec */ uint32_t oqticks_per_us = lio_cn23xx_pf_coprocessor_clock(oct); oct->pfvf_hsword.coproc_tics_per_us = oqticks_per_us; /* This gives the clock cycles per millisecond */ oqticks_per_us *= 1000; /* This gives the oq ticks (1024 core clock cycles) per millisecond */ oqticks_per_us /= 1024; /* * time_intr is in microseconds. The next 2 steps gives the oq ticks * corresponding to time_intr. */ oqticks_per_us *= time_intr_in_us; oqticks_per_us /= 1000; return (oqticks_per_us); } static void lio_cn23xx_pf_setup_global_mac_regs(struct octeon_device *oct) { uint64_t reg_val; uint16_t mac_no = oct->pcie_port; uint16_t pf_num = oct->pf_num; /* programming SRN and TRS for each MAC(0..3) */ lio_dev_dbg(oct, "%s: Using pcie port %d\n", __func__, mac_no); /* By default, mapping all 64 IOQs to a single MACs */ reg_val = lio_read_csr64(oct, LIO_CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num)); /* setting SRN <6:0> */ reg_val = pf_num * LIO_CN23XX_PF_MAX_RINGS; /* setting TRS <23:16> */ reg_val = reg_val | (oct->sriov_info.trs << LIO_CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS); /* write these settings to MAC register */ lio_write_csr64(oct, LIO_CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num), reg_val); - lio_dev_dbg(oct, "SLI_PKT_MAC(%d)_PF(%d)_RINFO : 0x%016lx\n", mac_no, + lio_dev_dbg(oct, "SLI_PKT_MAC(%d)_PF(%d)_RINFO : 0x%016llx\n", mac_no, pf_num, - lio_read_csr64(oct, + LIO_CAST64(lio_read_csr64(oct, LIO_CN23XX_SLI_PKT_MAC_RINFO64(mac_no, - pf_num))); + pf_num)))); } static int lio_cn23xx_pf_reset_io_queues(struct octeon_device *oct) { uint64_t d64; uint32_t ern, loop = BUSY_READING_REG_PF_LOOP_COUNT; uint32_t q_no, srn; int ret_val = 0; srn = oct->sriov_info.pf_srn; ern = srn + oct->sriov_info.num_pf_rings; /* As per HRM reg description, s/w cant write 0 to ENB. */ /* to make the queue off, need to set the RST bit. */ /* Reset the Enable bit for all the 64 IQs. */ for (q_no = srn; q_no < ern; q_no++) { /* set RST bit to 1. This bit applies to both IQ and OQ */ d64 = lio_read_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); d64 = d64 | LIO_CN23XX_PKT_INPUT_CTL_RST; lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64); } /* wait until the RST bit is clear or the RST and quiet bits are set */ for (q_no = srn; q_no < ern; q_no++) { volatile uint64_t reg_val = lio_read_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); while ((reg_val & LIO_CN23XX_PKT_INPUT_CTL_RST) && !(reg_val & LIO_CN23XX_PKT_INPUT_CTL_QUIET) && loop) { reg_val = lio_read_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); loop--; } if (!loop) { lio_dev_err(oct, "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n", q_no); return (-1); } reg_val &= ~LIO_CN23XX_PKT_INPUT_CTL_RST; lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val); reg_val = lio_read_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); if (reg_val & LIO_CN23XX_PKT_INPUT_CTL_RST) { lio_dev_err(oct, "clearing the reset failed for qno: %u\n", q_no); ret_val = -1; } } return (ret_val); } static int lio_cn23xx_pf_setup_global_input_regs(struct octeon_device *oct) { struct lio_cn23xx_pf *cn23xx = (struct lio_cn23xx_pf *)oct->chip; struct lio_instr_queue *iq; uint64_t intr_threshold; uint64_t pf_num, reg_val; uint32_t q_no, ern, srn; pf_num = oct->pf_num; srn = oct->sriov_info.pf_srn; ern = srn + oct->sriov_info.num_pf_rings; if (lio_cn23xx_pf_reset_io_queues(oct)) return (-1); /* * Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg * for all queues.Only PF can set these bits. * bits 29:30 indicate the MAC num. * bits 32:47 indicate the PVF num. */ for (q_no = 0; q_no < ern; q_no++) { reg_val = oct->pcie_port << LIO_CN23XX_PKT_INPUT_CTL_MAC_NUM_POS; reg_val |= pf_num << LIO_CN23XX_PKT_INPUT_CTL_PF_NUM_POS; lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val); } /* * Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for * pf queues */ for (q_no = srn; q_no < ern; q_no++) { uint32_t inst_cnt_reg; iq = oct->instr_queue[q_no]; if (iq != NULL) inst_cnt_reg = iq->inst_cnt_reg; else inst_cnt_reg = LIO_CN23XX_SLI_IQ_INSTR_COUNT64(q_no); reg_val = lio_read_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); reg_val |= LIO_CN23XX_PKT_INPUT_CTL_MASK; lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val); /* Set WMARK level for triggering PI_INT */ /* intr_threshold = LIO_CN23XX_DEF_IQ_INTR_THRESHOLD & */ intr_threshold = LIO_GET_IQ_INTR_PKT_CFG(cn23xx->conf) & LIO_CN23XX_PKT_IN_DONE_WMARK_MASK; lio_write_csr64(oct, inst_cnt_reg, (lio_read_csr64(oct, inst_cnt_reg) & ~(LIO_CN23XX_PKT_IN_DONE_WMARK_MASK << LIO_CN23XX_PKT_IN_DONE_WMARK_BIT_POS)) | (intr_threshold << LIO_CN23XX_PKT_IN_DONE_WMARK_BIT_POS)); } return (0); } static void lio_cn23xx_pf_setup_global_output_regs(struct octeon_device *oct) { struct lio_cn23xx_pf *cn23xx = (struct lio_cn23xx_pf *)oct->chip; uint64_t time_threshold; uint32_t ern, q_no, reg_val, srn; srn = oct->sriov_info.pf_srn; ern = srn + oct->sriov_info.num_pf_rings; if (LIO_GET_IS_SLI_BP_ON_CFG(cn23xx->conf)) { lio_write_csr64(oct, LIO_CN23XX_SLI_OQ_WMARK, 32); } else { /* Set Output queue watermark to 0 to disable backpressure */ lio_write_csr64(oct, LIO_CN23XX_SLI_OQ_WMARK, 0); } for (q_no = srn; q_no < ern; q_no++) { reg_val = lio_read_csr32(oct, LIO_CN23XX_SLI_OQ_PKT_CONTROL(q_no)); /* set IPTR & DPTR */ reg_val |= LIO_CN23XX_PKT_OUTPUT_CTL_DPTR; /* reset BMODE */ reg_val &= ~(LIO_CN23XX_PKT_OUTPUT_CTL_BMODE); /* * No Relaxed Ordering, No Snoop, 64-bit Byte swap for * Output Queue ScatterList reset ROR_P, NSR_P */ reg_val &= ~(LIO_CN23XX_PKT_OUTPUT_CTL_ROR_P); reg_val &= ~(LIO_CN23XX_PKT_OUTPUT_CTL_NSR_P); #if BYTE_ORDER == LITTLE_ENDIAN reg_val &= ~(LIO_CN23XX_PKT_OUTPUT_CTL_ES_P); #else /* BYTE_ORDER != LITTLE_ENDIAN */ reg_val |= (LIO_CN23XX_PKT_OUTPUT_CTL_ES_P); #endif /* BYTE_ORDER == LITTLE_ENDIAN */ /* * No Relaxed Ordering, No Snoop, 64-bit Byte swap for * Output Queue Data reset ROR, NSR */ reg_val &= ~(LIO_CN23XX_PKT_OUTPUT_CTL_ROR); reg_val &= ~(LIO_CN23XX_PKT_OUTPUT_CTL_NSR); /* set the ES bit */ reg_val |= (LIO_CN23XX_PKT_OUTPUT_CTL_ES); /* write all the selected settings */ lio_write_csr32(oct, LIO_CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val); /* * Enabling these interrupt in oct->fn_list.enable_interrupt() * routine which called after IOQ init. * Set up interrupt packet and time thresholds * for all the OQs */ time_threshold =lio_cn23xx_pf_get_oq_ticks( oct, (uint32_t)LIO_GET_OQ_INTR_TIME_CFG(cn23xx->conf)); lio_write_csr64(oct, LIO_CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), (LIO_GET_OQ_INTR_PKT_CFG(cn23xx->conf) | (time_threshold << 32))); } /* Setting the water mark level for pko back pressure * */ lio_write_csr64(oct, LIO_CN23XX_SLI_OQ_WMARK, 0x40); /* Enable channel-level backpressure */ if (oct->pf_num) lio_write_csr64(oct, LIO_CN23XX_SLI_OUT_BP_EN2_W1S, 0xffffffffffffffffULL); else lio_write_csr64(oct, LIO_CN23XX_SLI_OUT_BP_EN_W1S, 0xffffffffffffffffULL); } static int lio_cn23xx_pf_setup_device_regs(struct octeon_device *oct) { lio_cn23xx_pf_enable_error_reporting(oct); /* program the MAC(0..3)_RINFO before setting up input/output regs */ lio_cn23xx_pf_setup_global_mac_regs(oct); if (lio_cn23xx_pf_setup_global_input_regs(oct)) return (-1); lio_cn23xx_pf_setup_global_output_regs(oct); /* * Default error timeout value should be 0x200000 to avoid host hang * when reads invalid register */ lio_write_csr64(oct, LIO_CN23XX_SLI_WINDOW_CTL, LIO_CN23XX_SLI_WINDOW_CTL_DEFAULT); /* set SLI_PKT_IN_JABBER to handle large VXLAN packets */ lio_write_csr64(oct, LIO_CN23XX_SLI_PKT_IN_JABBER, LIO_CN23XX_MAX_INPUT_JABBER); return (0); } static void lio_cn23xx_pf_setup_iq_regs(struct octeon_device *oct, uint32_t iq_no) { struct lio_instr_queue *iq = oct->instr_queue[iq_no]; uint64_t pkt_in_done; iq_no += oct->sriov_info.pf_srn; /* Write the start of the input queue's ring and its size */ lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_BASE_ADDR64(iq_no), iq->base_addr_dma); lio_write_csr32(oct, LIO_CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count); /* * Remember the doorbell & instruction count register addr * for this queue */ iq->doorbell_reg = LIO_CN23XX_SLI_IQ_DOORBELL(iq_no); iq->inst_cnt_reg = LIO_CN23XX_SLI_IQ_INSTR_COUNT64(iq_no); lio_dev_dbg(oct, "InstQ[%d]:dbell reg @ 0x%x instcnt_reg @ 0x%x\n", iq_no, iq->doorbell_reg, iq->inst_cnt_reg); /* * Store the current instruction counter (used in flush_iq * calculation) */ pkt_in_done = lio_read_csr64(oct, iq->inst_cnt_reg); if (oct->msix_on) { /* Set CINT_ENB to enable IQ interrupt */ lio_write_csr64(oct, iq->inst_cnt_reg, (pkt_in_done | LIO_CN23XX_INTR_CINT_ENB)); } else { /* * Clear the count by writing back what we read, but don't * enable interrupts */ lio_write_csr64(oct, iq->inst_cnt_reg, pkt_in_done); } iq->reset_instr_cnt = 0; } static void lio_cn23xx_pf_setup_oq_regs(struct octeon_device *oct, uint32_t oq_no) { struct lio_droq *droq = oct->droq[oq_no]; struct lio_cn23xx_pf *cn23xx = (struct lio_cn23xx_pf *)oct->chip; uint64_t cnt_threshold; uint64_t time_threshold; uint32_t reg_val; oq_no += oct->sriov_info.pf_srn; lio_write_csr64(oct, LIO_CN23XX_SLI_OQ_BASE_ADDR64(oq_no), droq->desc_ring_dma); lio_write_csr32(oct, LIO_CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count); lio_write_csr32(oct, LIO_CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no), droq->buffer_size); /* pkt_sent and pkts_credit regs */ droq->pkts_sent_reg = LIO_CN23XX_SLI_OQ_PKTS_SENT(oq_no); droq->pkts_credit_reg = LIO_CN23XX_SLI_OQ_PKTS_CREDIT(oq_no); if (!oct->msix_on) { /* * Enable this output queue to generate Packet Timer * Interrupt */ reg_val = lio_read_csr32(oct, LIO_CN23XX_SLI_OQ_PKT_CONTROL(oq_no)); reg_val |= LIO_CN23XX_PKT_OUTPUT_CTL_TENB; lio_write_csr32(oct, LIO_CN23XX_SLI_OQ_PKT_CONTROL(oq_no), reg_val); /* * Enable this output queue to generate Packet Count * Interrupt */ reg_val = lio_read_csr32(oct, LIO_CN23XX_SLI_OQ_PKT_CONTROL(oq_no)); reg_val |= LIO_CN23XX_PKT_OUTPUT_CTL_CENB; lio_write_csr32(oct, LIO_CN23XX_SLI_OQ_PKT_CONTROL(oq_no), reg_val); } else { time_threshold = lio_cn23xx_pf_get_oq_ticks(oct, (uint32_t)LIO_GET_OQ_INTR_TIME_CFG(cn23xx->conf)); cnt_threshold = (uint32_t)LIO_GET_OQ_INTR_PKT_CFG(cn23xx->conf); lio_write_csr64(oct, LIO_CN23XX_SLI_OQ_PKT_INT_LEVELS(oq_no), ((time_threshold << 32 | cnt_threshold))); } } static int lio_cn23xx_pf_enable_io_queues(struct octeon_device *oct) { uint64_t reg_val; uint32_t ern, loop = BUSY_READING_REG_PF_LOOP_COUNT; uint32_t q_no, srn; srn = oct->sriov_info.pf_srn; ern = srn + oct->num_iqs; for (q_no = srn; q_no < ern; q_no++) { /* set the corresponding IQ IS_64B bit */ if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) { reg_val = lio_read_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); reg_val = reg_val | LIO_CN23XX_PKT_INPUT_CTL_IS_64B; lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val); } /* set the corresponding IQ ENB bit */ if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) { /* * IOQs are in reset by default in PEM2 mode, * clearing reset bit */ reg_val = lio_read_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); if (reg_val & LIO_CN23XX_PKT_INPUT_CTL_RST) { while ((reg_val & LIO_CN23XX_PKT_INPUT_CTL_RST) && !(reg_val & LIO_CN23XX_PKT_INPUT_CTL_QUIET) && loop) { reg_val = lio_read_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); loop--; } if (!loop) { lio_dev_err(oct, "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n", q_no); return (-1); } reg_val = reg_val & ~LIO_CN23XX_PKT_INPUT_CTL_RST; lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val); reg_val = lio_read_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); if (reg_val & LIO_CN23XX_PKT_INPUT_CTL_RST) { lio_dev_err(oct, "clearing the reset failed for qno: %u\n", q_no); return (-1); } } reg_val = lio_read_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); reg_val = reg_val | LIO_CN23XX_PKT_INPUT_CTL_RING_ENB; lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val); } } for (q_no = srn; q_no < ern; q_no++) { uint32_t reg_val; /* set the corresponding OQ ENB bit */ if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) { reg_val = lio_read_csr32(oct, LIO_CN23XX_SLI_OQ_PKT_CONTROL(q_no)); reg_val = reg_val | LIO_CN23XX_PKT_OUTPUT_CTL_RING_ENB; lio_write_csr32(oct, LIO_CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val); } } return (0); } static void lio_cn23xx_pf_disable_io_queues(struct octeon_device *oct) { volatile uint64_t d64; volatile uint32_t d32; int loop; unsigned int q_no; uint32_t ern, srn; srn = oct->sriov_info.pf_srn; ern = srn + oct->num_iqs; /* Disable Input Queues. */ for (q_no = srn; q_no < ern; q_no++) { loop = lio_ms_to_ticks(1000); /* start the Reset for a particular ring */ d64 = lio_read_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); d64 &= ~LIO_CN23XX_PKT_INPUT_CTL_RING_ENB; d64 |= LIO_CN23XX_PKT_INPUT_CTL_RST; lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64); /* * Wait until hardware indicates that the particular IQ * is out of reset. */ d64 = lio_read_csr64(oct, LIO_CN23XX_SLI_PKT_IOQ_RING_RST); while (!(d64 & BIT_ULL(q_no)) && loop--) { d64 = lio_read_csr64(oct, LIO_CN23XX_SLI_PKT_IOQ_RING_RST); lio_sleep_timeout(1); loop--; } /* Reset the doorbell register for this Input Queue. */ lio_write_csr32(oct, LIO_CN23XX_SLI_IQ_DOORBELL(q_no), 0xFFFFFFFF); while (((lio_read_csr64(oct, LIO_CN23XX_SLI_IQ_DOORBELL(q_no))) != 0ULL) && loop--) { lio_sleep_timeout(1); } } /* Disable Output Queues. */ for (q_no = srn; q_no < ern; q_no++) { loop = lio_ms_to_ticks(1000); /* * Wait until hardware indicates that the particular IQ * is out of reset.It given that SLI_PKT_RING_RST is * common for both IQs and OQs */ d64 = lio_read_csr64(oct, LIO_CN23XX_SLI_PKT_IOQ_RING_RST); while (!(d64 & BIT_ULL(q_no)) && loop--) { d64 = lio_read_csr64(oct, LIO_CN23XX_SLI_PKT_IOQ_RING_RST); lio_sleep_timeout(1); loop--; } /* Reset the doorbell register for this Output Queue. */ lio_write_csr32(oct, LIO_CN23XX_SLI_OQ_PKTS_CREDIT(q_no), 0xFFFFFFFF); while ((lio_read_csr64(oct, LIO_CN23XX_SLI_OQ_PKTS_CREDIT(q_no)) != 0ULL) && loop--) { lio_sleep_timeout(1); } /* clear the SLI_PKT(0..63)_CNTS[CNT] reg value */ d32 = lio_read_csr32(oct, LIO_CN23XX_SLI_OQ_PKTS_SENT(q_no)); lio_write_csr32(oct, LIO_CN23XX_SLI_OQ_PKTS_SENT(q_no), d32); } } static uint64_t lio_cn23xx_pf_msix_interrupt_handler(void *dev) { struct lio_ioq_vector *ioq_vector = (struct lio_ioq_vector *)dev; struct octeon_device *oct = ioq_vector->oct_dev; struct lio_droq *droq = oct->droq[ioq_vector->droq_index]; uint64_t pkts_sent; uint64_t ret = 0; if (droq == NULL) { lio_dev_err(oct, "23XX bringup FIXME: oct pfnum:%d ioq_vector->ioq_num :%d droq is NULL\n", oct->pf_num, ioq_vector->ioq_num); return (0); } pkts_sent = lio_read_csr64(oct, droq->pkts_sent_reg); /* * If our device has interrupted, then proceed. Also check * for all f's if interrupt was triggered on an error * and the PCI read fails. */ if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL)) return (ret); /* Write count reg in sli_pkt_cnts to clear these int. */ if (pkts_sent & LIO_CN23XX_INTR_PO_INT) ret |= LIO_MSIX_PO_INT; if (pkts_sent & LIO_CN23XX_INTR_PI_INT) /* We will clear the count when we update the read_index. */ ret |= LIO_MSIX_PI_INT; /* * Never need to handle msix mbox intr for pf. They arrive on the last * msix */ return (ret); } static void lio_cn23xx_pf_interrupt_handler(void *dev) { struct octeon_device *oct = (struct octeon_device *)dev; struct lio_cn23xx_pf *cn23xx = (struct lio_cn23xx_pf *)oct->chip; uint64_t intr64; lio_dev_dbg(oct, "In %s octeon_dev @ %p\n", __func__, oct); intr64 = lio_read_csr64(oct, cn23xx->intr_sum_reg64); oct->int_status = 0; if (intr64 & LIO_CN23XX_INTR_ERR) lio_dev_err(oct, "Error Intr: 0x%016llx\n", LIO_CAST64(intr64)); if (oct->msix_on != LIO_FLAG_MSIX_ENABLED) { if (intr64 & LIO_CN23XX_INTR_PKT_DATA) oct->int_status |= LIO_DEV_INTR_PKT_DATA; } if (intr64 & (LIO_CN23XX_INTR_DMA0_FORCE)) oct->int_status |= LIO_DEV_INTR_DMA0_FORCE; if (intr64 & (LIO_CN23XX_INTR_DMA1_FORCE)) oct->int_status |= LIO_DEV_INTR_DMA1_FORCE; /* Clear the current interrupts */ lio_write_csr64(oct, cn23xx->intr_sum_reg64, intr64); } static void lio_cn23xx_pf_bar1_idx_setup(struct octeon_device *oct, uint64_t core_addr, uint32_t idx, int valid) { volatile uint64_t bar1; uint64_t reg_adr; if (!valid) { reg_adr = lio_pci_readq(oct, LIO_CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)); bar1 = reg_adr; lio_pci_writeq(oct, (bar1 & 0xFFFFFFFEULL), LIO_CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)); reg_adr = lio_pci_readq(oct, LIO_CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)); bar1 = reg_adr; return; } /* * The PEM(0..3)_BAR1_INDEX(0..15)[ADDR_IDX]<23:4> stores * bits <41:22> of the Core Addr */ lio_pci_writeq(oct, (((core_addr >> 22) << 4) | LIO_PCI_BAR1_MASK), LIO_CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)); bar1 = lio_pci_readq(oct, LIO_CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)); } static void lio_cn23xx_pf_bar1_idx_write(struct octeon_device *oct, uint32_t idx, uint32_t mask) { lio_pci_writeq(oct, mask, LIO_CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)); } static uint32_t lio_cn23xx_pf_bar1_idx_read(struct octeon_device *oct, uint32_t idx) { return ((uint32_t)lio_pci_readq(oct, LIO_CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx))); } /* always call with lock held */ static uint32_t lio_cn23xx_pf_update_read_index(struct lio_instr_queue *iq) { struct octeon_device *oct = iq->oct_dev; uint32_t new_idx; uint32_t last_done; uint32_t pkt_in_done = lio_read_csr32(oct, iq->inst_cnt_reg); last_done = pkt_in_done - iq->pkt_in_done; iq->pkt_in_done = pkt_in_done; /* * Modulo of the new index with the IQ size will give us * the new index. The iq->reset_instr_cnt is always zero for * cn23xx, so no extra adjustments are needed. */ new_idx = (iq->octeon_read_index + ((uint32_t)(last_done & LIO_CN23XX_PKT_IN_DONE_CNT_MASK))) % iq->max_count; return (new_idx); } static void lio_cn23xx_pf_enable_interrupt(struct octeon_device *oct, uint8_t intr_flag) { struct lio_cn23xx_pf *cn23xx = (struct lio_cn23xx_pf *)oct->chip; uint64_t intr_val = 0; /* Divide the single write to multiple writes based on the flag. */ /* Enable Interrupt */ if (intr_flag == OCTEON_ALL_INTR) { lio_write_csr64(oct, cn23xx->intr_enb_reg64, cn23xx->intr_mask64); } else if (intr_flag & OCTEON_OUTPUT_INTR) { intr_val = lio_read_csr64(oct, cn23xx->intr_enb_reg64); intr_val |= LIO_CN23XX_INTR_PKT_DATA; lio_write_csr64(oct, cn23xx->intr_enb_reg64, intr_val); } } static void lio_cn23xx_pf_disable_interrupt(struct octeon_device *oct, uint8_t intr_flag) { struct lio_cn23xx_pf *cn23xx = (struct lio_cn23xx_pf *)oct->chip; uint64_t intr_val = 0; /* Disable Interrupts */ if (intr_flag == OCTEON_ALL_INTR) { lio_write_csr64(oct, cn23xx->intr_enb_reg64, 0); } else if (intr_flag & OCTEON_OUTPUT_INTR) { intr_val = lio_read_csr64(oct, cn23xx->intr_enb_reg64); intr_val &= ~LIO_CN23XX_INTR_PKT_DATA; lio_write_csr64(oct, cn23xx->intr_enb_reg64, intr_val); } } static void lio_cn23xx_pf_get_pcie_qlmport(struct octeon_device *oct) { oct->pcie_port = (lio_read_csr32(oct, LIO_CN23XX_SLI_MAC_NUMBER)) & 0xff; lio_dev_dbg(oct, "CN23xx uses PCIE Port %d\n", oct->pcie_port); } static void lio_cn23xx_pf_get_pf_num(struct octeon_device *oct) { uint32_t fdl_bit; /* Read Function Dependency Link reg to get the function number */ fdl_bit = lio_read_pci_cfg(oct, LIO_CN23XX_PCIE_SRIOV_FDL); oct->pf_num = ((fdl_bit >> LIO_CN23XX_PCIE_SRIOV_FDL_BIT_POS) & LIO_CN23XX_PCIE_SRIOV_FDL_MASK); } static void lio_cn23xx_pf_setup_reg_address(struct octeon_device *oct) { struct lio_cn23xx_pf *cn23xx = (struct lio_cn23xx_pf *)oct->chip; oct->reg_list.pci_win_wr_addr = LIO_CN23XX_SLI_WIN_WR_ADDR64; oct->reg_list.pci_win_rd_addr_hi = LIO_CN23XX_SLI_WIN_RD_ADDR_HI; oct->reg_list.pci_win_rd_addr_lo = LIO_CN23XX_SLI_WIN_RD_ADDR64; oct->reg_list.pci_win_rd_addr = LIO_CN23XX_SLI_WIN_RD_ADDR64; oct->reg_list.pci_win_wr_data_hi = LIO_CN23XX_SLI_WIN_WR_DATA_HI; oct->reg_list.pci_win_wr_data_lo = LIO_CN23XX_SLI_WIN_WR_DATA_LO; oct->reg_list.pci_win_wr_data = LIO_CN23XX_SLI_WIN_WR_DATA64; oct->reg_list.pci_win_rd_data = LIO_CN23XX_SLI_WIN_RD_DATA64; lio_cn23xx_pf_get_pcie_qlmport(oct); cn23xx->intr_mask64 = LIO_CN23XX_INTR_MASK; if (!oct->msix_on) cn23xx->intr_mask64 |= LIO_CN23XX_INTR_PKT_TIME; cn23xx->intr_sum_reg64 = LIO_CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num); cn23xx->intr_enb_reg64 = LIO_CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num); } static int lio_cn23xx_pf_sriov_config(struct octeon_device *oct) { struct lio_cn23xx_pf *cn23xx = (struct lio_cn23xx_pf *)oct->chip; uint32_t num_pf_rings, total_rings, max_rings; cn23xx->conf = (struct lio_config *)lio_get_config_info(oct, LIO_23XX); max_rings = LIO_CN23XX_PF_MAX_RINGS; if (oct->sriov_info.num_pf_rings) { num_pf_rings = oct->sriov_info.num_pf_rings; if (num_pf_rings > max_rings) { num_pf_rings = min(mp_ncpus, max_rings); lio_dev_warn(oct, "num_queues_per_pf requested %u is more than available rings (%u). Reducing to %u\n", oct->sriov_info.num_pf_rings, max_rings, num_pf_rings); } } else { #ifdef RSS num_pf_rings = min(rss_getnumbuckets(), mp_ncpus); #else num_pf_rings = min(mp_ncpus, max_rings); #endif } total_rings = num_pf_rings; oct->sriov_info.trs = total_rings; oct->sriov_info.pf_srn = total_rings - num_pf_rings; oct->sriov_info.num_pf_rings = num_pf_rings; lio_dev_dbg(oct, "trs:%d pf_srn:%d num_pf_rings:%d\n", oct->sriov_info.trs, oct->sriov_info.pf_srn, oct->sriov_info.num_pf_rings); return (0); } int lio_cn23xx_pf_setup_device(struct octeon_device *oct) { uint64_t BAR0, BAR1; uint32_t data32; data32 = lio_read_pci_cfg(oct, 0x10); BAR0 = (uint64_t)(data32 & ~0xf); data32 = lio_read_pci_cfg(oct, 0x14); BAR0 |= ((uint64_t)data32 << 32); data32 = lio_read_pci_cfg(oct, 0x18); BAR1 = (uint64_t)(data32 & ~0xf); data32 = lio_read_pci_cfg(oct, 0x1c); BAR1 |= ((uint64_t)data32 << 32); if (!BAR0 || !BAR1) { if (!BAR0) lio_dev_err(oct, "Device BAR0 unassigned\n"); if (!BAR1) lio_dev_err(oct, "Device BAR1 unassigned\n"); return (1); } if (lio_map_pci_barx(oct, 0)) return (1); if (lio_map_pci_barx(oct, 1)) { lio_dev_err(oct, "%s CN23XX BAR1 map failed\n", __func__); lio_unmap_pci_barx(oct, 0); return (1); } lio_cn23xx_pf_get_pf_num(oct); if (lio_cn23xx_pf_sriov_config(oct)) { lio_unmap_pci_barx(oct, 0); lio_unmap_pci_barx(oct, 1); return (1); } lio_write_csr64(oct, LIO_CN23XX_SLI_MAC_CREDIT_CNT, 0x3F802080802080ULL); oct->fn_list.setup_iq_regs = lio_cn23xx_pf_setup_iq_regs; oct->fn_list.setup_oq_regs = lio_cn23xx_pf_setup_oq_regs; oct->fn_list.process_interrupt_regs = lio_cn23xx_pf_interrupt_handler; oct->fn_list.msix_interrupt_handler = lio_cn23xx_pf_msix_interrupt_handler; oct->fn_list.soft_reset = lio_cn23xx_pf_soft_reset; oct->fn_list.setup_device_regs = lio_cn23xx_pf_setup_device_regs; oct->fn_list.update_iq_read_idx = lio_cn23xx_pf_update_read_index; oct->fn_list.bar1_idx_setup = lio_cn23xx_pf_bar1_idx_setup; oct->fn_list.bar1_idx_write = lio_cn23xx_pf_bar1_idx_write; oct->fn_list.bar1_idx_read = lio_cn23xx_pf_bar1_idx_read; oct->fn_list.enable_interrupt = lio_cn23xx_pf_enable_interrupt; oct->fn_list.disable_interrupt = lio_cn23xx_pf_disable_interrupt; oct->fn_list.enable_io_queues = lio_cn23xx_pf_enable_io_queues; oct->fn_list.disable_io_queues = lio_cn23xx_pf_disable_io_queues; lio_cn23xx_pf_setup_reg_address(oct); oct->coproc_clock_rate = 1000000ULL * lio_cn23xx_pf_coprocessor_clock(oct); return (0); } int lio_cn23xx_pf_fw_loaded(struct octeon_device *oct) { uint64_t val; val = lio_read_csr64(oct, LIO_CN23XX_SLI_SCRATCH2); return ((val >> SCR2_BIT_FW_LOADED) & 1ULL); } Index: head/sys/dev/liquidio/base/lio_device.c =================================================================== --- head/sys/dev/liquidio/base/lio_device.c (revision 324993) +++ head/sys/dev/liquidio/base/lio_device.c (revision 324994) @@ -1,1062 +1,1062 @@ /* * BSD LICENSE * * Copyright(c) 2017 Cavium, Inc.. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Cavium, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "lio_bsd.h" #include "lio_common.h" #include "lio_droq.h" #include "lio_iq.h" #include "lio_response_manager.h" #include "lio_device.h" #include "lio_main.h" #include "lio_network.h" #include "cn23xx_pf_device.h" #include "lio_image.h" #include "lio_mem_ops.h" static struct lio_config default_cn23xx_conf = { .card_type = LIO_23XX, .card_name = LIO_23XX_NAME, /* IQ attributes */ .iq = { .max_iqs = LIO_CN23XX_CFG_IO_QUEUES, .pending_list_size = (LIO_CN23XX_DEFAULT_IQ_DESCRIPTORS * LIO_CN23XX_CFG_IO_QUEUES), .instr_type = LIO_64BYTE_INSTR, .db_min = LIO_CN23XX_DB_MIN, .db_timeout = LIO_CN23XX_DB_TIMEOUT, .iq_intr_pkt = LIO_CN23XX_DEF_IQ_INTR_THRESHOLD, }, /* OQ attributes */ .oq = { .max_oqs = LIO_CN23XX_CFG_IO_QUEUES, .pkts_per_intr = LIO_CN23XX_OQ_PKTS_PER_INTR, .refill_threshold = LIO_CN23XX_OQ_REFIL_THRESHOLD, .oq_intr_pkt = LIO_CN23XX_OQ_INTR_PKT, .oq_intr_time = LIO_CN23XX_OQ_INTR_TIME, }, .num_nic_ports = LIO_CN23XX_DEFAULT_NUM_PORTS, .num_def_rx_descs = LIO_CN23XX_DEFAULT_OQ_DESCRIPTORS, .num_def_tx_descs = LIO_CN23XX_DEFAULT_IQ_DESCRIPTORS, .def_rx_buf_size = LIO_CN23XX_OQ_BUF_SIZE, /* For ethernet interface 0: Port cfg Attributes */ .nic_if_cfg[0] = { /* Max Txqs: Half for each of the two ports :max_iq/2 */ .max_txqs = LIO_MAX_TXQS_PER_INTF, /* Actual configured value. Range could be: 1...max_txqs */ .num_txqs = LIO_DEF_TXQS_PER_INTF, /* Max Rxqs: Half for each of the two ports :max_oq/2 */ .max_rxqs = LIO_MAX_RXQS_PER_INTF, /* Actual configured value. Range could be: 1...max_rxqs */ .num_rxqs = LIO_DEF_RXQS_PER_INTF, /* Num of desc for rx rings */ .num_rx_descs = LIO_CN23XX_DEFAULT_OQ_DESCRIPTORS, /* Num of desc for tx rings */ .num_tx_descs = LIO_CN23XX_DEFAULT_IQ_DESCRIPTORS, /* * Mbuf size, We need not change buf size even for Jumbo frames. * Octeon can send jumbo frames in 4 consecutive descriptors, */ .rx_buf_size = LIO_CN23XX_OQ_BUF_SIZE, .base_queue = LIO_BASE_QUEUE_NOT_REQUESTED, .gmx_port_id = 0, }, .nic_if_cfg[1] = { /* Max Txqs: Half for each of the two ports :max_iq/2 */ .max_txqs = LIO_MAX_TXQS_PER_INTF, /* Actual configured value. Range could be: 1...max_txqs */ .num_txqs = LIO_DEF_TXQS_PER_INTF, /* Max Rxqs: Half for each of the two ports :max_oq/2 */ .max_rxqs = LIO_MAX_RXQS_PER_INTF, /* Actual configured value. Range could be: 1...max_rxqs */ .num_rxqs = LIO_DEF_RXQS_PER_INTF, /* Num of desc for rx rings */ .num_rx_descs = LIO_CN23XX_DEFAULT_OQ_DESCRIPTORS, /* Num of desc for tx rings */ .num_tx_descs = LIO_CN23XX_DEFAULT_IQ_DESCRIPTORS, /* * Mbuf size, We need not change buf size even for Jumbo frames. * Octeon can send jumbo frames in 4 consecutive descriptors, */ .rx_buf_size = LIO_CN23XX_OQ_BUF_SIZE, .base_queue = LIO_BASE_QUEUE_NOT_REQUESTED, .gmx_port_id = 1, }, .misc = { /* Host driver link query interval */ .oct_link_query_interval = 100, /* Octeon link query interval */ .host_link_query_interval = 500, .enable_sli_oq_bp = 0, /* Control queue group */ .ctrlq_grp = 1, } }; static struct lio_config_ptr { uint32_t conf_type; } oct_conf_info[LIO_MAX_DEVICES] = { { LIO_CFG_TYPE_DEFAULT, }, { LIO_CFG_TYPE_DEFAULT, }, { LIO_CFG_TYPE_DEFAULT, }, { LIO_CFG_TYPE_DEFAULT, }, }; static char lio_state_str[LIO_DEV_STATES + 1][32] = { "BEGIN", "PCI-ENABLE-DONE", "PCI-MAP-DONE", "DISPATCH-INIT-DONE", "IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE", "DROQ-INIT-DONE", "MBOX-SETUP-DONE", "MSIX-ALLOC-VECTOR-DONE", "INTR-SET-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE", "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET", "INVALID" }; static char lio_app_str[LIO_DRV_APP_COUNT + 1][32] = {"BASE", "NIC", "UNKNOWN"}; static struct octeon_device *octeon_device[LIO_MAX_DEVICES]; static volatile int lio_adapter_refcounts[LIO_MAX_DEVICES]; static uint32_t octeon_device_count; /* locks device array (i.e. octeon_device[]) */ struct mtx octeon_devices_lock; static struct lio_core_setup core_setup[LIO_MAX_DEVICES]; static void oct_set_config_info(int oct_id, int conf_type) { if (conf_type < 0 || conf_type > (LIO_NUM_CFGS - 1)) conf_type = LIO_CFG_TYPE_DEFAULT; oct_conf_info[oct_id].conf_type = conf_type; } void lio_init_device_list(int conf_type) { int i; bzero(octeon_device, (sizeof(void *) * LIO_MAX_DEVICES)); for (i = 0; i < LIO_MAX_DEVICES; i++) oct_set_config_info(i, conf_type); mtx_init(&octeon_devices_lock, "octeon_devices_lock", NULL, MTX_DEF); } static void * __lio_retrieve_config_info(struct octeon_device *oct, uint16_t card_type) { void *ret = NULL; uint32_t oct_id = oct->octeon_id; switch (oct_conf_info[oct_id].conf_type) { case LIO_CFG_TYPE_DEFAULT: if (oct->chip_id == LIO_CN23XX_PF_VID) { ret = &default_cn23xx_conf; } break; default: break; } return (ret); } void * lio_get_config_info(struct octeon_device *oct, uint16_t card_type) { void *conf = NULL; conf = __lio_retrieve_config_info(oct, card_type); if (conf == NULL) return (NULL); return (conf); } char * lio_get_state_string(volatile int *state_ptr) { int32_t istate = (int32_t)atomic_load_acq_int(state_ptr); if (istate > LIO_DEV_STATES || istate < 0) return (lio_state_str[LIO_DEV_STATE_INVALID]); return (lio_state_str[istate]); } static char * lio_get_app_string(uint32_t app_mode) { if (app_mode <= LIO_DRV_APP_END) return (lio_app_str[app_mode - LIO_DRV_APP_START]); return (lio_app_str[LIO_DRV_INVALID_APP - LIO_DRV_APP_START]); } void lio_free_device_mem(struct octeon_device *oct) { int i; for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) { if ((oct->io_qmask.oq & BIT_ULL(i)) && (oct->droq[i])) free(oct->droq[i], M_DEVBUF); } for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) { if ((oct->io_qmask.iq & BIT_ULL(i)) && (oct->instr_queue[i])) free(oct->instr_queue[i], M_DEVBUF); } i = oct->octeon_id; free(oct->chip, M_DEVBUF); octeon_device[i] = NULL; octeon_device_count--; } static struct octeon_device * lio_allocate_device_mem(device_t device) { struct octeon_device *oct; uint32_t configsize = 0, pci_id = 0, size; uint8_t *buf = NULL; pci_id = pci_get_device(device); switch (pci_id) { case LIO_CN23XX_PF_VID: configsize = sizeof(struct lio_cn23xx_pf); break; default: device_printf(device, "Error: Unknown PCI Device: 0x%x\n", pci_id); return (NULL); } if (configsize & 0x7) configsize += (8 - (configsize & 0x7)); size = configsize + (sizeof(struct lio_dispatch) * LIO_DISPATCH_LIST_SIZE); buf = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); if (buf == NULL) return (NULL); oct = (struct octeon_device *)device_get_softc(device); oct->chip = (void *)(buf); oct->dispatch.dlist = (struct lio_dispatch *)(buf + configsize); return (oct); } struct octeon_device * lio_allocate_device(device_t device) { struct octeon_device *oct = NULL; uint32_t oct_idx = 0; mtx_lock(&octeon_devices_lock); for (oct_idx = 0; oct_idx < LIO_MAX_DEVICES; oct_idx++) if (!octeon_device[oct_idx]) break; if (oct_idx < LIO_MAX_DEVICES) { oct = lio_allocate_device_mem(device); if (oct != NULL) { octeon_device_count++; octeon_device[oct_idx] = oct; } } mtx_unlock(&octeon_devices_lock); if (oct == NULL) return (NULL); mtx_init(&oct->pci_win_lock, "pci_win_lock", NULL, MTX_DEF); mtx_init(&oct->mem_access_lock, "mem_access_lock", NULL, MTX_DEF); oct->octeon_id = oct_idx; snprintf(oct->device_name, sizeof(oct->device_name), "%s%d", LIO_DRV_NAME, oct->octeon_id); return (oct); } /* * Register a device's bus location at initialization time. * @param oct - pointer to the octeon device structure. * @param bus - PCIe bus # * @param dev - PCIe device # * @param func - PCIe function # * @param is_pf - TRUE for PF, FALSE for VF * @return reference count of device's adapter */ int lio_register_device(struct octeon_device *oct, int bus, int dev, int func, int is_pf) { int idx, refcount; oct->loc.bus = bus; oct->loc.dev = dev; oct->loc.func = func; oct->adapter_refcount = &lio_adapter_refcounts[oct->octeon_id]; atomic_store_rel_int(oct->adapter_refcount, 0); mtx_lock(&octeon_devices_lock); for (idx = (int)oct->octeon_id - 1; idx >= 0; idx--) { if (octeon_device[idx] == NULL) { lio_dev_err(oct, "%s: Internal driver error, missing dev\n", __func__); mtx_unlock(&octeon_devices_lock); atomic_add_int(oct->adapter_refcount, 1); return (1); /* here, refcount is guaranteed to be 1 */ } /* if another device is at same bus/dev, use its refcounter */ if ((octeon_device[idx]->loc.bus == bus) && (octeon_device[idx]->loc.dev == dev)) { oct->adapter_refcount = octeon_device[idx]->adapter_refcount; break; } } mtx_unlock(&octeon_devices_lock); atomic_add_int(oct->adapter_refcount, 1); refcount = atomic_load_acq_int(oct->adapter_refcount); lio_dev_dbg(oct, "%s: %02x:%02x:%d refcount %u\n", __func__, oct->loc.bus, oct->loc.dev, oct->loc.func, refcount); return (refcount); } /* * Deregister a device at de-initialization time. * @param oct - pointer to the octeon device structure. * @return reference count of device's adapter */ int lio_deregister_device(struct octeon_device *oct) { int refcount; atomic_subtract_int(oct->adapter_refcount, 1); refcount = atomic_load_acq_int(oct->adapter_refcount); lio_dev_dbg(oct, "%s: %04d:%02d:%d refcount %u\n", __func__, oct->loc.bus, oct->loc.dev, oct->loc.func, refcount); return (refcount); } int lio_allocate_ioq_vector(struct octeon_device *oct) { struct lio_ioq_vector *ioq_vector; int i, cpu_num, num_ioqs = 0, size; if (LIO_CN23XX_PF(oct)) num_ioqs = oct->sriov_info.num_pf_rings; size = sizeof(struct lio_ioq_vector) * num_ioqs; oct->ioq_vector = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); if (oct->ioq_vector == NULL) return (1); for (i = 0; i < num_ioqs; i++) { ioq_vector = &oct->ioq_vector[i]; ioq_vector->oct_dev = oct; ioq_vector->droq_index = i; cpu_num = i % mp_ncpus; CPU_SETOF(cpu_num, &ioq_vector->affinity_mask); if (oct->chip_id == LIO_CN23XX_PF_VID) ioq_vector->ioq_num = i + oct->sriov_info.pf_srn; else ioq_vector->ioq_num = i; } return (0); } void lio_free_ioq_vector(struct octeon_device *oct) { free(oct->ioq_vector, M_DEVBUF); oct->ioq_vector = NULL; } /* this function is only for setting up the first queue */ int lio_setup_instr_queue0(struct octeon_device *oct) { union octeon_txpciq txpciq; uint32_t iq_no = 0; uint32_t num_descs = 0; if (LIO_CN23XX_PF(oct)) num_descs = LIO_GET_NUM_DEF_TX_DESCS_CFG(LIO_CHIP_CONF(oct, cn23xx_pf)); oct->num_iqs = 0; oct->instr_queue[0]->q_index = 0; oct->instr_queue[0]->app_ctx = (void *)(size_t)0; oct->instr_queue[0]->ifidx = 0; txpciq.txpciq64 = 0; txpciq.s.q_no = iq_no; txpciq.s.pkind = oct->pfvf_hsword.pkind; txpciq.s.use_qpg = 0; txpciq.s.qpg = 0; if (lio_init_instr_queue(oct, txpciq, num_descs)) { /* prevent memory leak */ lio_delete_instr_queue(oct, 0); return (1); } oct->num_iqs++; return (0); } int lio_setup_output_queue0(struct octeon_device *oct) { uint32_t desc_size = 0, num_descs = 0, oq_no = 0; if (LIO_CN23XX_PF(oct)) { num_descs = LIO_GET_NUM_DEF_RX_DESCS_CFG(LIO_CHIP_CONF(oct, cn23xx_pf)); desc_size = LIO_GET_DEF_RX_BUF_SIZE_CFG(LIO_CHIP_CONF(oct, cn23xx_pf)); } oct->num_oqs = 0; if (lio_init_droq(oct, oq_no, num_descs, desc_size, NULL)) { return (1); } oct->num_oqs++; return (0); } int lio_init_dispatch_list(struct octeon_device *oct) { uint32_t i; oct->dispatch.count = 0; for (i = 0; i < LIO_DISPATCH_LIST_SIZE; i++) { oct->dispatch.dlist[i].opcode = 0; STAILQ_INIT(&oct->dispatch.dlist[i].head); } mtx_init(&oct->dispatch.lock, "dispatch_lock", NULL, MTX_DEF); return (0); } void lio_delete_dispatch_list(struct octeon_device *oct) { struct lio_stailq_head freelist; struct lio_stailq_node *temp, *tmp2; uint32_t i; STAILQ_INIT(&freelist); mtx_lock(&oct->dispatch.lock); for (i = 0; i < LIO_DISPATCH_LIST_SIZE; i++) { struct lio_stailq_head *dispatch; dispatch = &oct->dispatch.dlist[i].head; while (!STAILQ_EMPTY(dispatch)) { temp = STAILQ_FIRST(dispatch); STAILQ_REMOVE_HEAD(&oct->dispatch.dlist[i].head, entries); STAILQ_INSERT_TAIL(&freelist, temp, entries); } oct->dispatch.dlist[i].opcode = 0; } oct->dispatch.count = 0; mtx_unlock(&oct->dispatch.lock); STAILQ_FOREACH_SAFE(temp, &freelist, entries, tmp2) { STAILQ_REMOVE_HEAD(&freelist, entries); free(temp, M_DEVBUF); } } lio_dispatch_fn_t lio_get_dispatch(struct octeon_device *octeon_dev, uint16_t opcode, uint16_t subcode) { struct lio_stailq_node *dispatch; lio_dispatch_fn_t fn = NULL; uint32_t idx; uint16_t combined_opcode = LIO_OPCODE_SUBCODE(opcode, subcode); idx = combined_opcode & LIO_OPCODE_MASK; mtx_lock(&octeon_dev->dispatch.lock); if (octeon_dev->dispatch.count == 0) { mtx_unlock(&octeon_dev->dispatch.lock); return (NULL); } if (!(octeon_dev->dispatch.dlist[idx].opcode)) { mtx_unlock(&octeon_dev->dispatch.lock); return (NULL); } if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) { fn = octeon_dev->dispatch.dlist[idx].dispatch_fn; } else { STAILQ_FOREACH(dispatch, &octeon_dev->dispatch.dlist[idx].head, entries) { if (((struct lio_dispatch *)dispatch)->opcode == combined_opcode) { fn = ((struct lio_dispatch *) dispatch)->dispatch_fn; break; } } } mtx_unlock(&octeon_dev->dispatch.lock); return (fn); } /* * lio_register_dispatch_fn * Parameters: * octeon_id - id of the octeon device. * opcode - opcode for which driver should call the registered function * subcode - subcode for which driver should call the registered function * fn - The function to call when a packet with "opcode" arrives in * octeon output queues. * fn_arg - The argument to be passed when calling function "fn". * Description: * Registers a function and its argument to be called when a packet * arrives in Octeon output queues with "opcode". * Returns: * Success: 0 * Failure: 1 * Locks: * No locks are held. */ int lio_register_dispatch_fn(struct octeon_device *oct, uint16_t opcode, uint16_t subcode, lio_dispatch_fn_t fn, void *fn_arg) { lio_dispatch_fn_t pfn; uint32_t idx; uint16_t combined_opcode = LIO_OPCODE_SUBCODE(opcode, subcode); idx = combined_opcode & LIO_OPCODE_MASK; mtx_lock(&oct->dispatch.lock); /* Add dispatch function to first level of lookup table */ if (oct->dispatch.dlist[idx].opcode == 0) { oct->dispatch.dlist[idx].opcode = combined_opcode; oct->dispatch.dlist[idx].dispatch_fn = fn; oct->dispatch.dlist[idx].arg = fn_arg; oct->dispatch.count++; mtx_unlock(&oct->dispatch.lock); return (0); } mtx_unlock(&oct->dispatch.lock); /* * Check if there was a function already registered for this * opcode/subcode. */ pfn = lio_get_dispatch(oct, opcode, subcode); if (!pfn) { struct lio_dispatch *dispatch; lio_dev_dbg(oct, "Adding opcode to dispatch list linked list\n"); dispatch = (struct lio_dispatch *) malloc(sizeof(struct lio_dispatch), M_DEVBUF, M_NOWAIT | M_ZERO); if (dispatch == NULL) { lio_dev_err(oct, "No memory to add dispatch function\n"); return (1); } dispatch->opcode = combined_opcode; dispatch->dispatch_fn = fn; dispatch->arg = fn_arg; /* * Add dispatch function to linked list of fn ptrs * at the hashed index. */ mtx_lock(&oct->dispatch.lock); STAILQ_INSERT_HEAD(&oct->dispatch.dlist[idx].head, &dispatch->node, entries); oct->dispatch.count++; mtx_unlock(&oct->dispatch.lock); } else { lio_dev_err(oct, "Found previously registered dispatch fn for opcode/subcode: %x/%x\n", opcode, subcode); return (1); } return (0); } /* * lio_unregister_dispatch_fn * Parameters: * oct - octeon device * opcode - driver should unregister the function for this opcode * subcode - driver should unregister the function for this subcode * Description: * Unregister the function set for this opcode+subcode. * Returns: * Success: 0 * Failure: 1 * Locks: * No locks are held. */ int lio_unregister_dispatch_fn(struct octeon_device *oct, uint16_t opcode, uint16_t subcode) { struct lio_stailq_head *dispatch_head; struct lio_stailq_node *dispatch, *dfree = NULL, *tmp2; int retval = 0; uint32_t idx; uint16_t combined_opcode = LIO_OPCODE_SUBCODE(opcode, subcode); idx = combined_opcode & LIO_OPCODE_MASK; mtx_lock(&oct->dispatch.lock); if (oct->dispatch.count == 0) { mtx_unlock(&oct->dispatch.lock); lio_dev_err(oct, "No dispatch functions registered for this device\n"); return (1); } if (oct->dispatch.dlist[idx].opcode == combined_opcode) { dispatch_head = &oct->dispatch.dlist[idx].head; if (!STAILQ_EMPTY(dispatch_head)) { dispatch = STAILQ_FIRST(dispatch_head); oct->dispatch.dlist[idx].opcode = ((struct lio_dispatch *)dispatch)->opcode; oct->dispatch.dlist[idx].dispatch_fn = ((struct lio_dispatch *)dispatch)->dispatch_fn; oct->dispatch.dlist[idx].arg = ((struct lio_dispatch *)dispatch)->arg; STAILQ_REMOVE_HEAD(dispatch_head, entries); dfree = dispatch; } else { oct->dispatch.dlist[idx].opcode = 0; oct->dispatch.dlist[idx].dispatch_fn = NULL; oct->dispatch.dlist[idx].arg = NULL; } } else { retval = 1; STAILQ_FOREACH_SAFE(dispatch, &oct->dispatch.dlist[idx].head, entries, tmp2) { if (((struct lio_dispatch *)dispatch)->opcode == combined_opcode) { STAILQ_REMOVE(&oct->dispatch.dlist[idx].head, dispatch, lio_stailq_node, entries); dfree = dispatch; retval = 0; } } } if (!retval) oct->dispatch.count--; mtx_unlock(&oct->dispatch.lock); free(dfree, M_DEVBUF); return (retval); } int lio_core_drv_init(struct lio_recv_info *recv_info, void *buf) { struct octeon_device *oct = (struct octeon_device *)buf; struct lio_recv_pkt *recv_pkt = recv_info->recv_pkt; struct lio_core_setup *cs = NULL; uint32_t i; uint32_t num_nic_ports = 0; char app_name[16]; if (LIO_CN23XX_PF(oct)) num_nic_ports = LIO_GET_NUM_NIC_PORTS_CFG( LIO_CHIP_CONF(oct, cn23xx_pf)); if (atomic_load_acq_int(&oct->status) >= LIO_DEV_RUNNING) { lio_dev_err(oct, "Received CORE OK when device state is 0x%x\n", atomic_load_acq_int(&oct->status)); goto core_drv_init_err; } strncpy(app_name, lio_get_app_string((uint32_t) recv_pkt->rh.r_core_drv_init.app_mode), sizeof(app_name) - 1); oct->app_mode = (uint32_t)recv_pkt->rh.r_core_drv_init.app_mode; if (recv_pkt->rh.r_core_drv_init.app_mode == LIO_DRV_NIC_APP) { oct->fw_info.max_nic_ports = (uint32_t)recv_pkt->rh.r_core_drv_init.max_nic_ports; oct->fw_info.num_gmx_ports = (uint32_t)recv_pkt->rh.r_core_drv_init.num_gmx_ports; } if (oct->fw_info.max_nic_ports < num_nic_ports) { lio_dev_err(oct, "Config has more ports than firmware allows (%d > %d).\n", num_nic_ports, oct->fw_info.max_nic_ports); goto core_drv_init_err; } oct->fw_info.app_cap_flags = recv_pkt->rh.r_core_drv_init.app_cap_flags; oct->fw_info.app_mode = (uint32_t)recv_pkt->rh.r_core_drv_init.app_mode; oct->pfvf_hsword.app_mode = (uint32_t)recv_pkt->rh.r_core_drv_init.app_mode; oct->pfvf_hsword.pkind = recv_pkt->rh.r_core_drv_init.pkind; for (i = 0; i < oct->num_iqs; i++) oct->instr_queue[i]->txpciq.s.pkind = oct->pfvf_hsword.pkind; atomic_store_rel_int(&oct->status, LIO_DEV_CORE_OK); cs = &core_setup[oct->octeon_id]; if (recv_pkt->buffer_size[0] != (sizeof(*cs) + LIO_DROQ_INFO_SIZE)) { - lio_dev_dbg(oct, "Core setup bytes expected %lu found %d\n", - (uint32_t)sizeof(*cs) + LIO_DROQ_INFO_SIZE, + lio_dev_dbg(oct, "Core setup bytes expected %llu found %d\n", + LIO_CAST64(sizeof(*cs) + LIO_DROQ_INFO_SIZE), recv_pkt->buffer_size[0]); } memcpy(cs, recv_pkt->buffer_ptr[0]->m_data + LIO_DROQ_INFO_SIZE, sizeof(*cs)); strncpy(oct->boardinfo.name, cs->boardname, LIO_BOARD_NAME); strncpy(oct->boardinfo.serial_number, cs->board_serial_number, LIO_SERIAL_NUM_LEN); lio_swap_8B_data((uint64_t *)cs, (sizeof(*cs) >> 3)); oct->boardinfo.major = cs->board_rev_major; oct->boardinfo.minor = cs->board_rev_minor; lio_dev_info(oct, "Running %s (%llu Hz)\n", app_name, LIO_CAST64(cs->corefreq)); core_drv_init_err: for (i = 0; i < recv_pkt->buffer_count; i++) lio_recv_buffer_free(recv_pkt->buffer_ptr[i]); lio_free_recv_info(recv_info); return (0); } int lio_get_tx_qsize(struct octeon_device *oct, uint32_t q_no) { if ((oct != NULL) && (q_no < (uint32_t)LIO_MAX_INSTR_QUEUES(oct)) && (oct->io_qmask.iq & BIT_ULL(q_no))) return (oct->instr_queue[q_no]->max_count); return (-1); } int lio_get_rx_qsize(struct octeon_device *oct, uint32_t q_no) { if ((oct != NULL) && (q_no < (uint32_t)LIO_MAX_OUTPUT_QUEUES(oct)) && (oct->io_qmask.oq & BIT_ULL(q_no))) return (oct->droq[q_no]->max_count); return (-1); } /* Returns the host firmware handshake OCTEON specific configuration */ struct lio_config * lio_get_conf(struct octeon_device *oct) { struct lio_config *default_oct_conf = NULL; /* * check the OCTEON Device model & return the corresponding octeon * configuration */ if (LIO_CN23XX_PF(oct)) { default_oct_conf = (struct lio_config *)( LIO_CHIP_CONF(oct, cn23xx_pf)); } return (default_oct_conf); } /* * Get the octeon device pointer. * @param octeon_id - The id for which the octeon device pointer is required. * @return Success: Octeon device pointer. * @return Failure: NULL. */ struct octeon_device * lio_get_device(uint32_t octeon_id) { if (octeon_id >= LIO_MAX_DEVICES) return (NULL); else return (octeon_device[octeon_id]); } uint64_t lio_pci_readq(struct octeon_device *oct, uint64_t addr) { uint64_t val64; volatile uint32_t val32, addrhi; mtx_lock(&oct->pci_win_lock); /* * The windowed read happens when the LSB of the addr is written. * So write MSB first */ addrhi = (addr >> 32); if (oct->chip_id == LIO_CN23XX_PF_VID) addrhi |= 0x00060000; lio_write_csr32(oct, oct->reg_list.pci_win_rd_addr_hi, addrhi); /* Read back to preserve ordering of writes */ val32 = lio_read_csr32(oct, oct->reg_list.pci_win_rd_addr_hi); lio_write_csr32(oct, oct->reg_list.pci_win_rd_addr_lo, addr & 0xffffffff); val32 = lio_read_csr32(oct, oct->reg_list.pci_win_rd_addr_lo); val64 = lio_read_csr64(oct, oct->reg_list.pci_win_rd_data); mtx_unlock(&oct->pci_win_lock); return (val64); } void lio_pci_writeq(struct octeon_device *oct, uint64_t val, uint64_t addr) { volatile uint32_t val32; mtx_lock(&oct->pci_win_lock); lio_write_csr64(oct, oct->reg_list.pci_win_wr_addr, addr); /* The write happens when the LSB is written. So write MSB first. */ lio_write_csr32(oct, oct->reg_list.pci_win_wr_data_hi, val >> 32); /* Read the MSB to ensure ordering of writes. */ val32 = lio_read_csr32(oct, oct->reg_list.pci_win_wr_data_hi); lio_write_csr32(oct, oct->reg_list.pci_win_wr_data_lo, val & 0xffffffff); mtx_unlock(&oct->pci_win_lock); } int lio_mem_access_ok(struct octeon_device *oct) { uint64_t access_okay = 0; uint64_t lmc0_reset_ctl; /* Check to make sure a DDR interface is enabled */ if (LIO_CN23XX_PF(oct)) { lmc0_reset_ctl = lio_pci_readq(oct, LIO_CN23XX_LMC0_RESET_CTL); access_okay = (lmc0_reset_ctl & LIO_CN23XX_LMC0_RESET_CTL_DDR3RST_MASK); } return (access_okay ? 0 : 1); } int lio_wait_for_ddr_init(struct octeon_device *oct, unsigned long *timeout) { int ret = 1; uint32_t ms; if (timeout == NULL) return (ret); for (ms = 0; ret && ((*timeout == 0) || (ms <= *timeout)); ms += 100) { ret = lio_mem_access_ok(oct); /* wait 100 ms */ if (ret) lio_sleep_timeout(100); } return (ret); } /* * Get the octeon id assigned to the octeon device passed as argument. * This function is exported to other modules. * @param dev - octeon device pointer passed as a void *. * @return octeon device id */ int lio_get_device_id(void *dev) { struct octeon_device *octeon_dev = (struct octeon_device *)dev; uint32_t i; for (i = 0; i < LIO_MAX_DEVICES; i++) if (octeon_device[i] == octeon_dev) return (octeon_dev->octeon_id); return (-1); } void lio_enable_irq(struct lio_droq *droq, struct lio_instr_queue *iq) { struct octeon_device *oct = NULL; uint64_t instr_cnt; uint32_t pkts_pend; /* the whole thing needs to be atomic, ideally */ if (droq != NULL) { oct = droq->oct_dev; pkts_pend = atomic_load_acq_int(&droq->pkts_pending); mtx_lock(&droq->lock); lio_write_csr32(oct, droq->pkts_sent_reg, droq->pkt_count - pkts_pend); droq->pkt_count = pkts_pend; /* this write needs to be flushed before we release the lock */ __compiler_membar(); mtx_unlock(&droq->lock); } if (iq != NULL) { oct = iq->oct_dev; mtx_lock(&iq->lock); lio_write_csr32(oct, iq->inst_cnt_reg, iq->pkt_in_done); iq->pkt_in_done = 0; /* this write needs to be flushed before we release the lock */ __compiler_membar(); mtx_unlock(&iq->lock); } /* * Implementation note: * * SLI_PKT(x)_CNTS[RESEND] is written separately so that if an interrupt * DOES occur as a result of RESEND, the DROQ lock will NOT be held. * * Write resend. Writing RESEND in SLI_PKTX_CNTS should be enough * to trigger tx interrupts as well, if they are pending. */ if ((oct != NULL) && (LIO_CN23XX_PF(oct))) { if (droq != NULL) lio_write_csr64(oct, droq->pkts_sent_reg, LIO_CN23XX_INTR_RESEND); /* we race with firmrware here. */ /* read and write the IN_DONE_CNTS */ else if (iq != NULL) { instr_cnt = lio_read_csr64(oct, iq->inst_cnt_reg); lio_write_csr64(oct, iq->inst_cnt_reg, ((instr_cnt & 0xFFFFFFFF00000000ULL) | LIO_CN23XX_INTR_RESEND)); } } } Index: head/sys/dev/liquidio/base/lio_device.h =================================================================== --- head/sys/dev/liquidio/base/lio_device.h (revision 324993) +++ head/sys/dev/liquidio/base/lio_device.h (revision 324994) @@ -1,888 +1,898 @@ /* * BSD LICENSE * * Copyright(c) 2017 Cavium, Inc.. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Cavium, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ /* * \brief Host Driver: This file defines the octeon device structure. */ #ifndef _LIO_DEVICE_H_ #define _LIO_DEVICE_H_ #include /* for BYTE_ORDER */ /* PCI VendorId Device Id */ #define LIO_CN23XX_PF_PCIID 0x9702177d /* * Driver identifies chips by these Ids, created by clubbing together * DeviceId+RevisionId; Where Revision Id is not used to distinguish * between chips, a value of 0 is used for revision id. */ #define LIO_CN23XX_PF_VID 0x9702 #define LIO_CN2350_10G_SUBDEVICE 0x03 #define LIO_CN2350_10G_SUBDEVICE1 0x04 #define LIO_CN2360_10G_SUBDEVICE 0x05 #define LIO_CN2350_25G_SUBDEVICE 0x07 #define LIO_CN2360_25G_SUBDEVICE 0x06 /* Endian-swap modes supported by Octeon. */ enum lio_pci_swap_mode { LIO_PCI_PASSTHROUGH = 0, LIO_PCI_SWAP_64BIT = 1, LIO_PCI_SWAP_32BIT = 2, LIO_PCI_LW_SWAP_32BIT = 3 }; enum { LIO_CFG_TYPE_DEFAULT = 0, LIO_NUM_CFGS, }; #define OCTEON_OUTPUT_INTR (2) #define OCTEON_ALL_INTR 0xff /*--------------- PCI BAR1 index registers -------------*/ /* BAR1 Mask */ #define LIO_PCI_BAR1_ENABLE_CA 1 #define LIO_PCI_BAR1_ENDIAN_MODE LIO_PCI_SWAP_64BIT #define LIO_PCI_BAR1_ENTRY_VALID 1 #define LIO_PCI_BAR1_MASK ((LIO_PCI_BAR1_ENABLE_CA << 3) | \ (LIO_PCI_BAR1_ENDIAN_MODE << 1) | \ LIO_PCI_BAR1_ENTRY_VALID) /* * Octeon Device state. * Each octeon device goes through each of these states * as it is initialized. */ #define LIO_DEV_BEGIN_STATE 0x0 #define LIO_DEV_PCI_ENABLE_DONE 0x1 #define LIO_DEV_PCI_MAP_DONE 0x2 #define LIO_DEV_DISPATCH_INIT_DONE 0x3 #define LIO_DEV_INSTR_QUEUE_INIT_DONE 0x4 #define LIO_DEV_SC_BUFF_POOL_INIT_DONE 0x5 #define LIO_DEV_MSIX_ALLOC_VECTOR_DONE 0x6 #define LIO_DEV_RESP_LIST_INIT_DONE 0x7 #define LIO_DEV_DROQ_INIT_DONE 0x8 #define LIO_DEV_INTR_SET_DONE 0xa #define LIO_DEV_IO_QUEUES_DONE 0xb #define LIO_DEV_CONSOLE_INIT_DONE 0xc #define LIO_DEV_HOST_OK 0xd #define LIO_DEV_CORE_OK 0xe #define LIO_DEV_RUNNING 0xf #define LIO_DEV_IN_RESET 0x10 #define LIO_DEV_STATE_INVALID 0x11 #define LIO_DEV_STATES LIO_DEV_STATE_INVALID /* * Octeon Device interrupts * These interrupt bits are set in int_status filed of * octeon_device structure */ #define LIO_DEV_INTR_DMA0_FORCE 0x01 #define LIO_DEV_INTR_DMA1_FORCE 0x02 #define LIO_DEV_INTR_PKT_DATA 0x04 #define LIO_RESET_MSECS (3000) /*---------------------------DISPATCH LIST-------------------------------*/ /* * The dispatch list entry. * The driver keeps a record of functions registered for each * response header opcode in this structure. Since the opcode is * hashed to index into the driver's list, more than one opcode * can hash to the same entry, in which case the list field points * to a linked list with the other entries. */ struct lio_dispatch { /* Singly-linked tail queue node for this entry */ struct lio_stailq_node node; /* Singly-linked tail queue head for this entry */ struct lio_stailq_head head; /* The opcode for which the dispatch function & arg should be used */ uint16_t opcode; /* The function to be called for a packet received by the driver */ lio_dispatch_fn_t dispatch_fn; /* * The application specified argument to be passed to the above * function along with the received packet */ void *arg; }; /* The dispatch list structure. */ struct lio_dispatch_list { /* access to dispatch list must be atomic */ struct mtx lock; /* Count of dispatch functions currently registered */ uint32_t count; /* The list of dispatch functions */ struct lio_dispatch *dlist; }; /*----------------------- THE OCTEON DEVICE ---------------------------*/ #define LIO_MEM_REGIONS 3 /* * PCI address space information. * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of * Octeon gets mapped to different physical address spaces in * the kernel. */ struct lio_mem_bus_space { struct resource *pci_mem; bus_space_tag_t tag; bus_space_handle_t handle; }; #define LIO_MAX_MAPS 32 struct lio_io_enable { uint64_t iq; uint64_t oq; uint64_t iq64B; }; struct lio_reg_list { uint32_t pci_win_wr_addr; uint32_t pci_win_rd_addr_hi; uint32_t pci_win_rd_addr_lo; uint32_t pci_win_rd_addr; uint32_t pci_win_wr_data_hi; uint32_t pci_win_wr_data_lo; uint32_t pci_win_wr_data; uint32_t pci_win_rd_data; }; #define LIO_MAX_CONSOLE_READ_BYTES 512 typedef int (*octeon_console_print_fn)(struct octeon_device *oct, uint32_t num, char *pre, char *suf); struct lio_console { uint32_t active; uint32_t waiting; uint64_t addr; uint32_t buffer_size; uint64_t input_base_addr; uint64_t output_base_addr; octeon_console_print_fn print; char leftover[LIO_MAX_CONSOLE_READ_BYTES]; }; struct lio_board_info { char name[LIO_BOARD_NAME]; char serial_number[LIO_SERIAL_NUM_LEN]; uint64_t major; uint64_t minor; }; struct lio_fn_list { void (*setup_iq_regs) (struct octeon_device *, uint32_t); void (*setup_oq_regs) (struct octeon_device *, uint32_t); void (*process_interrupt_regs) (void *); uint64_t (*msix_interrupt_handler) (void *); int (*soft_reset) (struct octeon_device *); int (*setup_device_regs) (struct octeon_device *); void (*bar1_idx_setup) (struct octeon_device *, uint64_t, uint32_t, int); void (*bar1_idx_write) (struct octeon_device *, uint32_t, uint32_t); uint32_t (*bar1_idx_read) (struct octeon_device *, uint32_t); uint32_t (*update_iq_read_idx) (struct lio_instr_queue *); void (*enable_interrupt) (struct octeon_device *, uint8_t); void (*disable_interrupt) (struct octeon_device *, uint8_t); int (*enable_io_queues) (struct octeon_device *); void (*disable_io_queues) (struct octeon_device *); }; /* Must be multiple of 8, changing breaks ABI */ #define LIO_BOOTMEM_NAME_LEN 128 /* * Structure for named memory blocks * Number of descriptors * available can be changed without affecting compatibility, * but name length changes require a bump in the bootmem * descriptor version * Note: This structure must be naturally 64 bit aligned, as a single * memory image will be used by both 32 and 64 bit programs. */ struct cvmx_bootmem_named_block_desc { /* Base address of named block */ uint64_t base_addr; /* Size actually allocated for named block */ uint64_t size; /* name of named block */ char name[LIO_BOOTMEM_NAME_LEN]; }; struct lio_fw_info { uint32_t max_nic_ports; /* max nic ports for the device */ uint32_t num_gmx_ports; /* num gmx ports */ uint64_t app_cap_flags; /* firmware cap flags */ /* * The core application is running in this mode. * See octeon-drv-opcodes.h for values. */ uint32_t app_mode; char lio_firmware_version[32]; }; struct lio_callout { struct callout timer; void *ctxptr; uint64_t ctxul; }; #define LIO_NIC_STARTER_TIMEOUT 30000 /* 30000ms (30s) */ struct lio_tq { struct taskqueue *tq; struct timeout_task work; void *ctxptr; uint64_t ctxul; }; struct lio_if_props { /* * Each interface in the Octeon device has a network * device pointer (used for OS specific calls). */ int rx_on; int gmxport; struct ifnet *ifp; }; #define LIO_MSIX_PO_INT 0x1 #define LIO_MSIX_PI_INT 0x2 struct lio_pf_vf_hs_word { #if BYTE_ORDER == LITTLE_ENDIAN /* PKIND value assigned for the DPI interface */ uint64_t pkind:8; /* OCTEON core clock multiplier */ uint64_t core_tics_per_us:16; /* OCTEON coprocessor clock multiplier */ uint64_t coproc_tics_per_us:16; /* app that currently running on OCTEON */ uint64_t app_mode:8; /* RESERVED */ uint64_t reserved:16; #else /* BYTE_ORDER != LITTLE_ENDIAN */ /* RESERVED */ uint64_t reserved:16; /* app that currently running on OCTEON */ uint64_t app_mode:8; /* OCTEON coprocessor clock multiplier */ uint64_t coproc_tics_per_us:16; /* OCTEON core clock multiplier */ uint64_t core_tics_per_us:16; /* PKIND value assigned for the DPI interface */ uint64_t pkind:8; #endif /* BYTE_ORDER == LITTLE_ENDIAN */ }; struct lio_sriov_info { /* Actual rings left for PF device */ uint32_t num_pf_rings; /* SRN of PF usable IO queues */ uint32_t pf_srn; /* total pf rings */ uint32_t trs; }; struct lio_ioq_vector { struct octeon_device *oct_dev; struct resource *msix_res; void *tag; int droq_index; int vector; cpuset_t affinity_mask; uint32_t ioq_num; }; /* * The Octeon device. * Each Octeon device has this structure to represent all its * components. */ struct octeon_device { /* Lock for PCI window configuration accesses */ struct mtx pci_win_lock; /* Lock for memory accesses */ struct mtx mem_access_lock; /* PCI device pointer */ device_t device; /* Chip specific information. */ void *chip; /* Number of interfaces detected in this octeon device. */ uint32_t ifcount; struct lio_if_props props; /* Octeon Chip type. */ uint16_t chip_id; uint16_t rev_id; uint16_t subdevice_id; uint16_t pf_num; /* This device's id - set by the driver. */ uint32_t octeon_id; /* This device's PCIe port used for traffic. */ uint16_t pcie_port; uint16_t flags; #define LIO_FLAG_MSIX_ENABLED (uint32_t)(1 << 2) /* The state of this device */ volatile int status; /* memory mapped io range */ struct lio_mem_bus_space mem_bus_space[LIO_MEM_REGIONS]; struct lio_reg_list reg_list; struct lio_fn_list fn_list; struct lio_board_info boardinfo; uint32_t num_iqs; /* The pool containing pre allocated buffers used for soft commands */ struct lio_sc_buffer_pool sc_buf_pool; /* The input instruction queues */ struct lio_instr_queue *instr_queue[LIO_MAX_POSSIBLE_INSTR_QUEUES]; /* The doubly-linked list of instruction response */ struct lio_response_list response_list[LIO_MAX_RESPONSE_LISTS]; uint32_t num_oqs; /* The DROQ output queues */ struct lio_droq *droq[LIO_MAX_POSSIBLE_OUTPUT_QUEUES]; struct lio_io_enable io_qmask; /* List of dispatch functions */ struct lio_dispatch_list dispatch; uint32_t int_status; /* Physical location of the cvmx_bootmem_desc_t in octeon memory */ uint64_t bootmem_desc_addr; /* * Placeholder memory for named blocks. * Assumes single-threaded access */ struct cvmx_bootmem_named_block_desc bootmem_named_block_desc; /* Address of consoles descriptor */ uint64_t console_desc_addr; /* Number of consoles available. 0 means they are inaccessible */ uint32_t num_consoles; /* Console caches */ struct lio_console console[LIO_MAX_MAPS]; /* Console named block info */ struct { uint64_t dram_region_base; int bar1_index; } console_nb_info; /* Coprocessor clock rate. */ uint64_t coproc_clock_rate; /* * The core application is running in this mode. See lio_common.h * for values. */ uint32_t app_mode; struct lio_fw_info fw_info; /* The name given to this device. */ char device_name[32]; struct lio_tq dma_comp_tq; /* Lock for dma response list */ struct mtx cmd_resp_wqlock; uint32_t cmd_resp_state; struct lio_tq check_db_tq[LIO_MAX_POSSIBLE_INSTR_QUEUES]; struct lio_callout console_timer[LIO_MAX_MAPS]; int num_msix_irqs; /* For PF, there is one non-ioq interrupt handler */ struct resource *msix_res; int aux_vector; void *tag; #define INTRNAMSIZ (32) #define IRQ_NAME_OFF(i) ((i) * INTRNAMSIZ) struct lio_sriov_info sriov_info; struct lio_pf_vf_hs_word pfvf_hsword; int msix_on; /* IOq information of it's corresponding MSI-X interrupt. */ struct lio_ioq_vector *ioq_vector; int rx_pause; int tx_pause; /* TX/RX process pkt budget */ uint32_t rx_budget; uint32_t tx_budget; struct octeon_link_stats link_stats; /* stastics from firmware */ struct proc *watchdog_task; volatile bool cores_crashed; uint32_t rx_coalesce_usecs; uint32_t rx_max_coalesced_frames; uint32_t tx_max_coalesced_frames; #define OCTEON_UBOOT_BUFFER_SIZE 512 char uboot_version[OCTEON_UBOOT_BUFFER_SIZE]; int uboot_len; int uboot_sidx, uboot_eidx; struct { int bus; int dev; int func; } loc; volatile int *adapter_refcount; /* reference count of adapter */ }; #define LIO_DRV_ONLINE 1 #define LIO_DRV_OFFLINE 2 #define LIO_CN23XX_PF(oct) ((oct)->chip_id == LIO_CN23XX_PF_VID) #define LIO_CHIP_CONF(oct, TYPE) \ (((struct lio_ ## TYPE *)((oct)->chip))->conf) #define MAX_IO_PENDING_PKT_COUNT 100 /*------------------ Function Prototypes ----------------------*/ /* Initialize device list memory */ void lio_init_device_list(int conf_type); /* Free memory for Input and Output queue structures for a octeon device */ void lio_free_device_mem(struct octeon_device *oct); /* * Look up a free entry in the octeon_device table and allocate resources * for the octeon_device structure for an octeon device. Called at init * time. */ struct octeon_device *lio_allocate_device(device_t device); /* * Register a device's bus location at initialization time. * @param oct - pointer to the octeon device structure. * @param bus - PCIe bus # * @param dev - PCIe device # * @param func - PCIe function # * @param is_pf - TRUE for PF, FALSE for VF * @return reference count of device's adapter */ int lio_register_device(struct octeon_device *oct, int bus, int dev, int func, int is_pf); /* * Deregister a device at de-initialization time. * @param oct - pointer to the octeon device structure. * @return reference count of device's adapter */ int lio_deregister_device(struct octeon_device *oct); /* * Initialize the driver's dispatch list which is a mix of a hash table * and a linked list. This is done at driver load time. * @param octeon_dev - pointer to the octeon device structure. * @return 0 on success, else -ve error value */ int lio_init_dispatch_list(struct octeon_device *octeon_dev); /* * Delete the driver's dispatch list and all registered entries. * This is done at driver unload time. * @param octeon_dev - pointer to the octeon device structure. */ void lio_delete_dispatch_list(struct octeon_device *octeon_dev); /* * Initialize the core device fields with the info returned by the FW. * @param recv_info - Receive info structure * @param buf - Receive buffer */ int lio_core_drv_init(struct lio_recv_info *recv_info, void *buf); /* * Gets the dispatch function registered to receive packets with a * given opcode/subcode. * @param octeon_dev - the octeon device pointer. * @param opcode - the opcode for which the dispatch function * is to checked. * @param subcode - the subcode for which the dispatch function * is to checked. * * @return Success: lio_dispatch_fn_t (dispatch function pointer) * @return Failure: NULL * * Looks up the dispatch list to get the dispatch function for a * given opcode. */ lio_dispatch_fn_t lio_get_dispatch(struct octeon_device *octeon_dev, uint16_t opcode, uint16_t subcode); /* * Get the octeon device pointer. * @param octeon_id - The id for which the octeon device pointer is required. * @return Success: Octeon device pointer. * @return Failure: NULL. */ struct octeon_device *lio_get_device(uint32_t octeon_id); /* * Get the octeon id assigned to the octeon device passed as argument. * This function is exported to other modules. * @param dev - octeon device pointer passed as a void *. * @return octeon device id */ int lio_get_device_id(void *dev); static inline uint16_t OCTEON_MAJOR_REV(struct octeon_device *oct) { uint16_t rev = (oct->rev_id & 0xC) >> 2; return ((rev == 0) ? 1 : rev); } static inline uint16_t OCTEON_MINOR_REV(struct octeon_device *oct) { return (oct->rev_id & 0x3); } /* * Read windowed register. * @param oct - pointer to the Octeon device. * @param addr - Address of the register to read. * * This routine is called to read from the indirectly accessed * Octeon registers that are visible through a PCI BAR0 mapped window * register. * @return - 64 bit value read from the register. */ uint64_t lio_pci_readq(struct octeon_device *oct, uint64_t addr); /* * Write windowed register. * @param oct - pointer to the Octeon device. * @param val - Value to write * @param addr - Address of the register to write * * This routine is called to write to the indirectly accessed * Octeon registers that are visible through a PCI BAR0 mapped window * register. * @return Nothing. */ void lio_pci_writeq(struct octeon_device *oct, uint64_t val, uint64_t addr); /* * Checks if memory access is okay * * @param oct which octeon to send to * @return Zero on success, negative on failure. */ int lio_mem_access_ok(struct octeon_device *oct); /* * Waits for DDR initialization. * * @param oct which octeon to send to * @param timeout_in_ms pointer to how long to wait until DDR is initialized * in ms. * If contents are 0, it waits until contents are non-zero * before starting to check. * @return Zero on success, negative on failure. */ int lio_wait_for_ddr_init(struct octeon_device *oct, unsigned long *timeout_in_ms); /* * Wait for u-boot to boot and be waiting for a command. * * @param wait_time_hundredths * Maximum time to wait * * @return Zero on success, negative on failure. */ int lio_wait_for_bootloader(struct octeon_device *oct, uint32_t wait_time_hundredths); /* * Initialize console access * * @param oct which octeon initialize * @return Zero on success, negative on failure. */ int lio_init_consoles(struct octeon_device *oct); /* * Adds access to a console to the device. * * @param oct: which octeon to add to * @param console_num: which console * @param dbg_enb: ptr to debug enablement string, one of: * * NULL for no debug output (i.e. disabled) * * empty string enables debug output (via default method) * * specific string to enable debug console output * * @return Zero on success, negative on failure. */ int lio_add_console(struct octeon_device *oct, uint32_t console_num, char *dbg_enb); /* write or read from a console */ int lio_console_write(struct octeon_device *oct, uint32_t console_num, char *buffer, uint32_t write_request_size, uint32_t flags); /* Removes all attached consoles. */ void lio_remove_consoles(struct octeon_device *oct); /* * Send a string to u-boot on console 0 as a command. * * @param oct which octeon to send to * @param cmd_str String to send * @param wait_hundredths Time to wait for u-boot to accept the command. * * @return Zero on success, negative on failure. */ int lio_console_send_cmd(struct octeon_device *oct, char *cmd_str, uint32_t wait_hundredths); /* * Parses, validates, and downloads firmware, then boots associated cores. * @param oct which octeon to download firmware to * @param data - The complete firmware file image * @param size - The size of the data * * @return 0 if success. * -EINVAL if file is incompatible or badly formatted. * -ENODEV if no handler was found for the application type or an * invalid octeon id was passed. */ int lio_download_firmware(struct octeon_device *oct, const uint8_t *data, size_t size); char *lio_get_state_string(volatile int *state_ptr); /* * Sets up instruction queues for the device * @param oct which octeon to setup * * @return 0 if success. 1 if fails */ int lio_setup_instr_queue0(struct octeon_device *oct); /* * Sets up output queues for the device * @param oct which octeon to setup * * @return 0 if success. 1 if fails */ int lio_setup_output_queue0(struct octeon_device *oct); int lio_get_tx_qsize(struct octeon_device *oct, uint32_t q_no); int lio_get_rx_qsize(struct octeon_device *oct, uint32_t q_no); /* * Retrieve the config for the device * @param oct which octeon * @param card_type type of card * * @returns pointer to configuration */ void *lio_get_config_info(struct octeon_device *oct, uint16_t card_type); /* * Gets the octeon device configuration * @return - pointer to the octeon configuration struture */ struct lio_config *lio_get_conf(struct octeon_device *oct); void lio_free_ioq_vector(struct octeon_device *oct); int lio_allocate_ioq_vector(struct octeon_device *oct); void lio_enable_irq(struct lio_droq *droq, struct lio_instr_queue *iq); static inline uint32_t lio_read_pci_cfg(struct octeon_device *oct, uint32_t reg) { return (pci_read_config(oct->device, reg, 4)); } static inline void lio_write_pci_cfg(struct octeon_device *oct, uint32_t reg, uint32_t value) { pci_write_config(oct->device, reg, value, 4); } static inline uint8_t lio_read_csr8(struct octeon_device *oct, uint32_t reg) { return (bus_space_read_1(oct->mem_bus_space[0].tag, oct->mem_bus_space[0].handle, reg)); } static inline void lio_write_csr8(struct octeon_device *oct, uint32_t reg, uint8_t val) { bus_space_write_1(oct->mem_bus_space[0].tag, oct->mem_bus_space[0].handle, reg, val); } static inline uint16_t lio_read_csr16(struct octeon_device *oct, uint32_t reg) { return (bus_space_read_2(oct->mem_bus_space[0].tag, oct->mem_bus_space[0].handle, reg)); } static inline void lio_write_csr16(struct octeon_device *oct, uint32_t reg, uint16_t val) { bus_space_write_2(oct->mem_bus_space[0].tag, oct->mem_bus_space[0].handle, reg, val); } static inline uint32_t lio_read_csr32(struct octeon_device *oct, uint32_t reg) { return (bus_space_read_4(oct->mem_bus_space[0].tag, oct->mem_bus_space[0].handle, reg)); } static inline void lio_write_csr32(struct octeon_device *oct, uint32_t reg, uint32_t val) { bus_space_write_4(oct->mem_bus_space[0].tag, oct->mem_bus_space[0].handle, reg, val); } static inline uint64_t lio_read_csr64(struct octeon_device *oct, uint32_t reg) { +#ifdef __i386__ + return (lio_read_csr32(oct, reg) | + ((uint64_t)lio_read_csr32(oct, reg + 4) << 32)); +#else return (bus_space_read_8(oct->mem_bus_space[0].tag, oct->mem_bus_space[0].handle, reg)); +#endif } static inline void lio_write_csr64(struct octeon_device *oct, uint32_t reg, uint64_t val) { +#ifdef __i386__ + lio_write_csr32(oct, reg, (uint32_t)val); + lio_write_csr32(oct, reg + 4, val >> 32); +#else bus_space_write_8(oct->mem_bus_space[0].tag, oct->mem_bus_space[0].handle, reg, val); +#endif } #endif /* _LIO_DEVICE_H_ */ Index: head/sys/dev/liquidio/base/lio_droq.c =================================================================== --- head/sys/dev/liquidio/base/lio_droq.c (revision 324993) +++ head/sys/dev/liquidio/base/lio_droq.c (revision 324994) @@ -1,868 +1,868 @@ /* * BSD LICENSE * * Copyright(c) 2017 Cavium, Inc.. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Cavium, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "lio_bsd.h" #include "lio_common.h" #include "lio_droq.h" #include "lio_iq.h" #include "lio_response_manager.h" #include "lio_device.h" #include "lio_main.h" #include "cn23xx_pf_device.h" #include "lio_network.h" struct __dispatch { struct lio_stailq_node node; struct lio_recv_info *rinfo; lio_dispatch_fn_t disp_fn; }; void *lio_get_dispatch_arg(struct octeon_device *oct, uint16_t opcode, uint16_t subcode); /* * Get the argument that the user set when registering dispatch * function for a given opcode/subcode. * @param octeon_dev - the octeon device pointer. * @param opcode - the opcode for which the dispatch argument * is to be checked. * @param subcode - the subcode for which the dispatch argument * is to be checked. * @return Success: void * (argument to the dispatch function) * @return Failure: NULL * */ void * lio_get_dispatch_arg(struct octeon_device *octeon_dev, uint16_t opcode, uint16_t subcode) { struct lio_stailq_node *dispatch; void *fn_arg = NULL; int idx; uint16_t combined_opcode; combined_opcode = LIO_OPCODE_SUBCODE(opcode, subcode); idx = combined_opcode & LIO_OPCODE_MASK; mtx_lock(&octeon_dev->dispatch.lock); if (octeon_dev->dispatch.count == 0) { mtx_unlock(&octeon_dev->dispatch.lock); return (NULL); } if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) { fn_arg = octeon_dev->dispatch.dlist[idx].arg; } else { STAILQ_FOREACH(dispatch, &octeon_dev->dispatch.dlist[idx].head, entries) { if (((struct lio_dispatch *)dispatch)->opcode == combined_opcode) { fn_arg = ((struct lio_dispatch *)dispatch)->arg; break; } } } mtx_unlock(&octeon_dev->dispatch.lock); return (fn_arg); } /* * Check for packets on Droq. This function should be called with lock held. * @param droq - Droq on which count is checked. * @return Returns packet count. */ uint32_t lio_droq_check_hw_for_pkts(struct lio_droq *droq) { struct octeon_device *oct = droq->oct_dev; uint32_t last_count; uint32_t pkt_count = 0; pkt_count = lio_read_csr32(oct, droq->pkts_sent_reg); last_count = pkt_count - droq->pkt_count; droq->pkt_count = pkt_count; /* we shall write to cnts at the end of processing */ if (last_count) atomic_add_int(&droq->pkts_pending, last_count); return (last_count); } static void lio_droq_compute_max_packet_bufs(struct lio_droq *droq) { uint32_t count = 0; /* * max_empty_descs is the max. no. of descs that can have no buffers. * If the empty desc count goes beyond this value, we cannot safely * read in a 64K packet sent by Octeon * (64K is max pkt size from Octeon) */ droq->max_empty_descs = 0; do { droq->max_empty_descs++; count += droq->buffer_size; } while (count < (64 * 1024)); droq->max_empty_descs = droq->max_count - droq->max_empty_descs; } static void lio_droq_reset_indices(struct lio_droq *droq) { droq->read_idx = 0; droq->refill_idx = 0; droq->refill_count = 0; atomic_store_rel_int(&droq->pkts_pending, 0); } static void lio_droq_destroy_ring_buffers(struct octeon_device *oct, struct lio_droq *droq) { uint32_t i; for (i = 0; i < droq->max_count; i++) { if (droq->recv_buf_list[i].buffer != NULL) { lio_recv_buffer_free(droq->recv_buf_list[i].buffer); droq->recv_buf_list[i].buffer = NULL; } } lio_droq_reset_indices(droq); } static int lio_droq_setup_ring_buffers(struct octeon_device *oct, struct lio_droq *droq) { struct lio_droq_desc *desc_ring = droq->desc_ring; void *buf; uint32_t i; for (i = 0; i < droq->max_count; i++) { buf = lio_recv_buffer_alloc(droq->buffer_size); if (buf == NULL) { lio_dev_err(oct, "%s buffer alloc failed\n", __func__); droq->stats.rx_alloc_failure++; return (-ENOMEM); } droq->recv_buf_list[i].buffer = buf; droq->recv_buf_list[i].data = ((struct mbuf *)buf)->m_data; desc_ring[i].info_ptr = 0; desc_ring[i].buffer_ptr = lio_map_ring(oct->device, droq->recv_buf_list[i].buffer, droq->buffer_size); } lio_droq_reset_indices(droq); lio_droq_compute_max_packet_bufs(droq); return (0); } int lio_delete_droq(struct octeon_device *oct, uint32_t q_no) { struct lio_droq *droq = oct->droq[q_no]; lio_dev_dbg(oct, "%s[%d]\n", __func__, q_no); while (taskqueue_cancel(droq->droq_taskqueue, &droq->droq_task, NULL)) taskqueue_drain(droq->droq_taskqueue, &droq->droq_task); taskqueue_free(droq->droq_taskqueue); droq->droq_taskqueue = NULL; lio_droq_destroy_ring_buffers(oct, droq); free(droq->recv_buf_list, M_DEVBUF); if (droq->desc_ring != NULL) lio_dma_free((droq->max_count * LIO_DROQ_DESC_SIZE), droq->desc_ring); oct->io_qmask.oq &= ~(1ULL << q_no); bzero(oct->droq[q_no], sizeof(struct lio_droq)); oct->num_oqs--; return (0); } void lio_droq_bh(void *ptr, int pending __unused) { struct lio_droq *droq = ptr; struct octeon_device *oct = droq->oct_dev; struct lio_instr_queue *iq = oct->instr_queue[droq->q_no]; int reschedule, tx_done = 1; reschedule = lio_droq_process_packets(oct, droq, oct->rx_budget); if (atomic_load_acq_int(&iq->instr_pending)) tx_done = lio_flush_iq(oct, iq, oct->tx_budget); if (reschedule || !tx_done) taskqueue_enqueue(droq->droq_taskqueue, &droq->droq_task); else lio_enable_irq(droq, iq); } int lio_init_droq(struct octeon_device *oct, uint32_t q_no, uint32_t num_descs, uint32_t desc_size, void *app_ctx) { struct lio_droq *droq; unsigned long size; uint32_t c_buf_size = 0, c_num_descs = 0, c_pkts_per_intr = 0; uint32_t c_refill_threshold = 0, desc_ring_size = 0; lio_dev_dbg(oct, "%s[%d]\n", __func__, q_no); droq = oct->droq[q_no]; bzero(droq, LIO_DROQ_SIZE); droq->oct_dev = oct; droq->q_no = q_no; if (app_ctx != NULL) droq->app_ctx = app_ctx; else droq->app_ctx = (void *)(size_t)q_no; c_num_descs = num_descs; c_buf_size = desc_size; if (LIO_CN23XX_PF(oct)) { struct lio_config *conf23 = LIO_CHIP_CONF(oct, cn23xx_pf); c_pkts_per_intr = (uint32_t)LIO_GET_OQ_PKTS_PER_INTR_CFG(conf23); c_refill_threshold = (uint32_t)LIO_GET_OQ_REFILL_THRESHOLD_CFG(conf23); } else { return (1); } droq->max_count = c_num_descs; droq->buffer_size = c_buf_size; desc_ring_size = droq->max_count * LIO_DROQ_DESC_SIZE; droq->desc_ring = lio_dma_alloc(desc_ring_size, &droq->desc_ring_dma); if (droq->desc_ring == NULL) { lio_dev_err(oct, "Output queue %d ring alloc failed\n", q_no); return (1); } - lio_dev_dbg(oct, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n", q_no, - droq->desc_ring, droq->desc_ring_dma); + lio_dev_dbg(oct, "droq[%d]: desc_ring: virt: 0x%p, dma: %llx\n", q_no, + droq->desc_ring, LIO_CAST64(droq->desc_ring_dma)); lio_dev_dbg(oct, "droq[%d]: num_desc: %d\n", q_no, droq->max_count); size = droq->max_count * LIO_DROQ_RECVBUF_SIZE; droq->recv_buf_list = (struct lio_recv_buffer *)malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); if (droq->recv_buf_list == NULL) { lio_dev_err(oct, "Output queue recv buf list alloc failed\n"); goto init_droq_fail; } if (lio_droq_setup_ring_buffers(oct, droq)) goto init_droq_fail; droq->pkts_per_intr = c_pkts_per_intr; droq->refill_threshold = c_refill_threshold; lio_dev_dbg(oct, "DROQ INIT: max_empty_descs: %d\n", droq->max_empty_descs); mtx_init(&droq->lock, "droq_lock", NULL, MTX_DEF); STAILQ_INIT(&droq->dispatch_stq_head); oct->fn_list.setup_oq_regs(oct, q_no); oct->io_qmask.oq |= BIT_ULL(q_no); /* * Initialize the taskqueue that handles * output queue packet processing. */ lio_dev_dbg(oct, "Initializing droq%d taskqueue\n", q_no); TASK_INIT(&droq->droq_task, 0, lio_droq_bh, (void *)droq); droq->droq_taskqueue = taskqueue_create_fast("lio_droq_task", M_NOWAIT, taskqueue_thread_enqueue, &droq->droq_taskqueue); taskqueue_start_threads_cpuset(&droq->droq_taskqueue, 1, PI_NET, &oct->ioq_vector[q_no].affinity_mask, "lio%d_droq%d_task", oct->octeon_id, q_no); return (0); init_droq_fail: lio_delete_droq(oct, q_no); return (1); } /* * lio_create_recv_info * Parameters: * octeon_dev - pointer to the octeon device structure * droq - droq in which the packet arrived. * buf_cnt - no. of buffers used by the packet. * idx - index in the descriptor for the first buffer in the packet. * Description: * Allocates a recv_info_t and copies the buffer addresses for packet data * into the recv_pkt space which starts at an 8B offset from recv_info_t. * Flags the descriptors for refill later. If available descriptors go * below the threshold to receive a 64K pkt, new buffers are first allocated * before the recv_pkt_t is created. * This routine will be called in interrupt context. * Returns: * Success: Pointer to recv_info_t * Failure: NULL. * Locks: * The droq->lock is held when this routine is called. */ static inline struct lio_recv_info * lio_create_recv_info(struct octeon_device *octeon_dev, struct lio_droq *droq, uint32_t buf_cnt, uint32_t idx) { struct lio_droq_info *info; struct lio_recv_pkt *recv_pkt; struct lio_recv_info *recv_info; uint32_t bytes_left, i; info = (struct lio_droq_info *)droq->recv_buf_list[idx].data; recv_info = lio_alloc_recv_info(sizeof(struct __dispatch)); if (recv_info == NULL) return (NULL); recv_pkt = recv_info->recv_pkt; recv_pkt->rh = info->rh; recv_pkt->length = (uint32_t)info->length; recv_pkt->buffer_count = (uint16_t)buf_cnt; recv_pkt->octeon_id = (uint16_t)octeon_dev->octeon_id; i = 0; bytes_left = (uint32_t)info->length; while (buf_cnt) { recv_pkt->buffer_size[i] = (bytes_left >= droq->buffer_size) ? droq->buffer_size : bytes_left; recv_pkt->buffer_ptr[i] = droq->recv_buf_list[idx].buffer; droq->recv_buf_list[idx].buffer = NULL; idx = lio_incr_index(idx, 1, droq->max_count); bytes_left -= droq->buffer_size; i++; buf_cnt--; } return (recv_info); } /* * If we were not able to refill all buffers, try to move around * the buffers that were not dispatched. */ static inline uint32_t lio_droq_refill_pullup_descs(struct lio_droq *droq, struct lio_droq_desc *desc_ring) { uint32_t desc_refilled = 0; uint32_t refill_index = droq->refill_idx; while (refill_index != droq->read_idx) { if (droq->recv_buf_list[refill_index].buffer != NULL) { droq->recv_buf_list[droq->refill_idx].buffer = droq->recv_buf_list[refill_index].buffer; droq->recv_buf_list[droq->refill_idx].data = droq->recv_buf_list[refill_index].data; desc_ring[droq->refill_idx].buffer_ptr = desc_ring[refill_index].buffer_ptr; droq->recv_buf_list[refill_index].buffer = NULL; desc_ring[refill_index].buffer_ptr = 0; do { droq->refill_idx = lio_incr_index(droq->refill_idx, 1, droq->max_count); desc_refilled++; droq->refill_count--; } while (droq->recv_buf_list[droq->refill_idx].buffer != NULL); } refill_index = lio_incr_index(refill_index, 1, droq->max_count); } /* while */ return (desc_refilled); } /* * lio_droq_refill * Parameters: * droq - droq in which descriptors require new buffers. * Description: * Called during normal DROQ processing in interrupt mode or by the poll * thread to refill the descriptors from which buffers were dispatched * to upper layers. Attempts to allocate new buffers. If that fails, moves * up buffers (that were not dispatched) to form a contiguous ring. * Returns: * No of descriptors refilled. * Locks: * This routine is called with droq->lock held. */ uint32_t lio_droq_refill(struct octeon_device *octeon_dev, struct lio_droq *droq) { struct lio_droq_desc *desc_ring; void *buf = NULL; uint32_t desc_refilled = 0; uint8_t *data; desc_ring = droq->desc_ring; while (droq->refill_count && (desc_refilled < droq->max_count)) { /* * If a valid buffer exists (happens if there is no dispatch), * reuse * the buffer, else allocate. */ if (droq->recv_buf_list[droq->refill_idx].buffer == NULL) { buf = lio_recv_buffer_alloc(droq->buffer_size); /* * If a buffer could not be allocated, no point in * continuing */ if (buf == NULL) { droq->stats.rx_alloc_failure++; break; } droq->recv_buf_list[droq->refill_idx].buffer = buf; data = ((struct mbuf *)buf)->m_data; } else { data = ((struct mbuf *)droq->recv_buf_list [droq->refill_idx].buffer)->m_data; } droq->recv_buf_list[droq->refill_idx].data = data; desc_ring[droq->refill_idx].buffer_ptr = lio_map_ring(octeon_dev->device, droq->recv_buf_list[droq->refill_idx].buffer, droq->buffer_size); droq->refill_idx = lio_incr_index(droq->refill_idx, 1, droq->max_count); desc_refilled++; droq->refill_count--; } if (droq->refill_count) desc_refilled += lio_droq_refill_pullup_descs(droq, desc_ring); /* * if droq->refill_count * The refill count would not change in pass two. We only moved buffers * to close the gap in the ring, but we would still have the same no. of * buffers to refill. */ return (desc_refilled); } static inline uint32_t lio_droq_get_bufcount(uint32_t buf_size, uint32_t total_len) { return ((total_len + buf_size - 1) / buf_size); } static int lio_droq_dispatch_pkt(struct octeon_device *oct, struct lio_droq *droq, union octeon_rh *rh, struct lio_droq_info *info) { struct lio_recv_info *rinfo; lio_dispatch_fn_t disp_fn; uint32_t cnt; cnt = lio_droq_get_bufcount(droq->buffer_size, (uint32_t)info->length); disp_fn = lio_get_dispatch(oct, (uint16_t)rh->r.opcode, (uint16_t)rh->r.subcode); if (disp_fn) { rinfo = lio_create_recv_info(oct, droq, cnt, droq->read_idx); if (rinfo != NULL) { struct __dispatch *rdisp = rinfo->rsvd; rdisp->rinfo = rinfo; rdisp->disp_fn = disp_fn; rinfo->recv_pkt->rh = *rh; STAILQ_INSERT_TAIL(&droq->dispatch_stq_head, &rdisp->node, entries); } else { droq->stats.dropped_nomem++; } } else { lio_dev_err(oct, "DROQ: No dispatch function (opcode %u/%u)\n", (unsigned int)rh->r.opcode, (unsigned int)rh->r.subcode); droq->stats.dropped_nodispatch++; } return (cnt); } static inline void lio_droq_drop_packets(struct octeon_device *oct, struct lio_droq *droq, uint32_t cnt) { struct lio_droq_info *info; uint32_t i = 0, buf_cnt; for (i = 0; i < cnt; i++) { info = (struct lio_droq_info *) droq->recv_buf_list[droq->read_idx].data; lio_swap_8B_data((uint64_t *)info, 2); if (info->length) { info->length += 8; droq->stats.bytes_received += info->length; buf_cnt = lio_droq_get_bufcount(droq->buffer_size, (uint32_t)info->length); } else { lio_dev_err(oct, "DROQ: In drop: pkt with len 0\n"); buf_cnt = 1; } droq->read_idx = lio_incr_index(droq->read_idx, buf_cnt, droq->max_count); droq->refill_count += buf_cnt; } } static uint32_t lio_droq_fast_process_packets(struct octeon_device *oct, struct lio_droq *droq, uint32_t pkts_to_process) { struct lio_droq_info *info; union octeon_rh *rh; uint32_t pkt, pkt_count, total_len = 0; pkt_count = pkts_to_process; for (pkt = 0; pkt < pkt_count; pkt++) { struct mbuf *nicbuf = NULL; uint32_t pkt_len = 0; info = (struct lio_droq_info *) droq->recv_buf_list[droq->read_idx].data; lio_swap_8B_data((uint64_t *)info, 2); if (!info->length) { lio_dev_err(oct, "DROQ[%d] idx: %d len:0, pkt_cnt: %d\n", droq->q_no, droq->read_idx, pkt_count); hexdump((uint8_t *)info, LIO_DROQ_INFO_SIZE, NULL, HD_OMIT_CHARS); pkt++; lio_incr_index(droq->read_idx, 1, droq->max_count); droq->refill_count++; break; } rh = &info->rh; info->length += 8; rh->r_dh.len += (LIO_DROQ_INFO_SIZE + 7) / 8; total_len += (uint32_t)info->length; if (lio_opcode_slow_path(rh)) { uint32_t buf_cnt; buf_cnt = lio_droq_dispatch_pkt(oct, droq, rh, info); droq->read_idx = lio_incr_index(droq->read_idx, buf_cnt, droq->max_count); droq->refill_count += buf_cnt; } else { if (info->length <= droq->buffer_size) { pkt_len = (uint32_t)info->length; nicbuf = droq->recv_buf_list[ droq->read_idx].buffer; nicbuf->m_len = pkt_len; droq->recv_buf_list[droq->read_idx].buffer = NULL; droq->read_idx = lio_incr_index(droq->read_idx, 1, droq->max_count); droq->refill_count++; } else { bool secondary_frag = false; pkt_len = 0; while (pkt_len < info->length) { int frag_len, idx = droq->read_idx; struct mbuf *buffer; frag_len = ((pkt_len + droq->buffer_size) > info->length) ? ((uint32_t)info->length - pkt_len) : droq->buffer_size; buffer = ((struct mbuf *) droq->recv_buf_list[idx]. buffer); buffer->m_len = frag_len; if (__predict_true(secondary_frag)) { m_cat(nicbuf, buffer); } else { nicbuf = buffer; secondary_frag = true; } droq->recv_buf_list[droq->read_idx]. buffer = NULL; pkt_len += frag_len; droq->read_idx = lio_incr_index(droq->read_idx, 1, droq->max_count); droq->refill_count++; } } if (nicbuf != NULL) { if (droq->ops.fptr != NULL) { droq->ops.fptr(nicbuf, pkt_len, rh, droq, droq->ops.farg); } else { lio_recv_buffer_free(nicbuf); } } } if (droq->refill_count >= droq->refill_threshold) { int desc_refilled = lio_droq_refill(oct, droq); /* * Flush the droq descriptor data to memory to be sure * that when we update the credits the data in memory * is accurate. */ wmb(); lio_write_csr32(oct, droq->pkts_credit_reg, desc_refilled); /* make sure mmio write completes */ __compiler_membar(); } } /* for (each packet)... */ /* Increment refill_count by the number of buffers processed. */ droq->stats.pkts_received += pkt; droq->stats.bytes_received += total_len; tcp_lro_flush_all(&droq->lro); if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) { lio_droq_drop_packets(oct, droq, (pkts_to_process - pkt)); droq->stats.dropped_toomany += (pkts_to_process - pkt); return (pkts_to_process); } return (pkt); } int lio_droq_process_packets(struct octeon_device *oct, struct lio_droq *droq, uint32_t budget) { struct lio_stailq_node *tmp, *tmp2; uint32_t pkt_count = 0, pkts_processed = 0; /* Grab the droq lock */ mtx_lock(&droq->lock); lio_droq_check_hw_for_pkts(droq); pkt_count = atomic_load_acq_int(&droq->pkts_pending); if (!pkt_count) { mtx_unlock(&droq->lock); return (0); } if (pkt_count > budget) pkt_count = budget; pkts_processed = lio_droq_fast_process_packets(oct, droq, pkt_count); atomic_subtract_int(&droq->pkts_pending, pkts_processed); /* Release the lock */ mtx_unlock(&droq->lock); STAILQ_FOREACH_SAFE(tmp, &droq->dispatch_stq_head, entries, tmp2) { struct __dispatch *rdisp = (struct __dispatch *)tmp; STAILQ_REMOVE_HEAD(&droq->dispatch_stq_head, entries); rdisp->disp_fn(rdisp->rinfo, lio_get_dispatch_arg(oct, (uint16_t)rdisp->rinfo->recv_pkt->rh.r.opcode, (uint16_t)rdisp->rinfo->recv_pkt->rh.r.subcode)); } /* If there are packets pending. schedule tasklet again */ if (atomic_load_acq_int(&droq->pkts_pending)) return (1); return (0); } int lio_register_droq_ops(struct octeon_device *oct, uint32_t q_no, struct lio_droq_ops *ops) { struct lio_droq *droq; struct lio_config *lio_cfg = NULL; lio_cfg = lio_get_conf(oct); if (lio_cfg == NULL) return (-EINVAL); if (ops == NULL) { lio_dev_err(oct, "%s: droq_ops pointer is NULL\n", __func__); return (-EINVAL); } if (q_no >= LIO_GET_OQ_MAX_Q_CFG(lio_cfg)) { lio_dev_err(oct, "%s: droq id (%d) exceeds MAX (%d)\n", __func__, q_no, (oct->num_oqs - 1)); return (-EINVAL); } droq = oct->droq[q_no]; mtx_lock(&droq->lock); memcpy(&droq->ops, ops, sizeof(struct lio_droq_ops)); mtx_unlock(&droq->lock); return (0); } int lio_unregister_droq_ops(struct octeon_device *oct, uint32_t q_no) { struct lio_droq *droq; struct lio_config *lio_cfg = NULL; lio_cfg = lio_get_conf(oct); if (lio_cfg == NULL) return (-EINVAL); if (q_no >= LIO_GET_OQ_MAX_Q_CFG(lio_cfg)) { lio_dev_err(oct, "%s: droq id (%d) exceeds MAX (%d)\n", __func__, q_no, oct->num_oqs - 1); return (-EINVAL); } droq = oct->droq[q_no]; if (droq == NULL) { lio_dev_info(oct, "Droq id (%d) not available.\n", q_no); return (0); } mtx_lock(&droq->lock); droq->ops.fptr = NULL; droq->ops.farg = NULL; droq->ops.drop_on_max = 0; mtx_unlock(&droq->lock); return (0); } int lio_create_droq(struct octeon_device *oct, uint32_t q_no, uint32_t num_descs, uint32_t desc_size, void *app_ctx) { if (oct->droq[q_no]->oct_dev != NULL) { lio_dev_dbg(oct, "Droq already in use. Cannot create droq %d again\n", q_no); return (1); } /* Initialize the Droq */ if (lio_init_droq(oct, q_no, num_descs, desc_size, app_ctx)) { bzero(oct->droq[q_no], sizeof(struct lio_droq)); goto create_droq_fail; } oct->num_oqs++; lio_dev_dbg(oct, "%s: Total number of OQ: %d\n", __func__, oct->num_oqs); /* Global Droq register settings */ /* * As of now not required, as setting are done for all 32 Droqs at * the same time. */ return (0); create_droq_fail: return (-ENOMEM); } Index: head/sys/dev/liquidio/base/lio_mem_ops.c =================================================================== --- head/sys/dev/liquidio/base/lio_mem_ops.c (revision 324993) +++ head/sys/dev/liquidio/base/lio_mem_ops.c (revision 324994) @@ -1,248 +1,276 @@ /* * BSD LICENSE * * Copyright(c) 2017 Cavium, Inc.. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Cavium, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "lio_bsd.h" #include "lio_common.h" #include "lio_droq.h" #include "lio_iq.h" #include "lio_response_manager.h" #include "lio_device.h" #include "lio_mem_ops.h" #define MEMOPS_IDX LIO_MAX_BAR1_MAP_INDEX #if BYTE_ORDER == BIG_ENDIAN static inline void lio_toggle_bar1_swapmode(struct octeon_device *oct, uint32_t idx) { uint32_t mask; mask = oct->fn_list.bar1_idx_read(oct, idx); mask = (mask & 0x2) ? (mask & ~2) : (mask | 2); oct->fn_list.bar1_idx_write(oct, idx, mask); } #else /* BYTE_ORDER != BIG_ENDIAN */ #define lio_toggle_bar1_swapmode(oct, idx) #endif /* BYTE_ORDER == BIG_ENDIAN */ static inline void lio_write_bar1_mem8(struct octeon_device *oct, uint32_t reg, uint64_t val) { bus_space_write_1(oct->mem_bus_space[1].tag, oct->mem_bus_space[1].handle, reg, val); } +#ifdef __i386__ +static inline uint32_t +lio_read_bar1_mem32(struct octeon_device *oct, uint32_t reg) +{ + + return (bus_space_read_4(oct->mem_bus_space[1].tag, + oct->mem_bus_space[1].handle, reg)); +} + +static inline void +lio_write_bar1_mem32(struct octeon_device *oct, uint32_t reg, uint32_t val) +{ + + bus_space_write_4(oct->mem_bus_space[1].tag, + oct->mem_bus_space[1].handle, reg, val); +} +#endif + static inline uint64_t lio_read_bar1_mem64(struct octeon_device *oct, uint32_t reg) { +#ifdef __i386__ + return (lio_read_bar1_mem32(oct, reg) | + ((uint64_t)lio_read_bar1_mem32(oct, reg + 4) << 32)); +#else return (bus_space_read_8(oct->mem_bus_space[1].tag, oct->mem_bus_space[1].handle, reg)); +#endif } static inline void lio_write_bar1_mem64(struct octeon_device *oct, uint32_t reg, uint64_t val) { +#ifdef __i386__ + lio_write_bar1_mem32(oct, reg, (uint32_t)val); + lio_write_bar1_mem32(oct, reg + 4, val >> 32); +#else bus_space_write_8(oct->mem_bus_space[1].tag, oct->mem_bus_space[1].handle, reg, val); +#endif } static void lio_pci_fastwrite(struct octeon_device *oct, uint32_t offset, uint8_t *hostbuf, uint32_t len) { while ((len) && ((unsigned long)offset) & 7) { lio_write_bar1_mem8(oct, offset++, *(hostbuf++)); len--; } lio_toggle_bar1_swapmode(oct, MEMOPS_IDX); while (len >= 8) { lio_write_bar1_mem64(oct, offset, *((uint64_t *)hostbuf)); offset += 8; hostbuf += 8; len -= 8; } lio_toggle_bar1_swapmode(oct, MEMOPS_IDX); while (len--) lio_write_bar1_mem8(oct, offset++, *(hostbuf++)); } static inline uint64_t lio_read_bar1_mem8(struct octeon_device *oct, uint32_t reg) { return (bus_space_read_1(oct->mem_bus_space[1].tag, oct->mem_bus_space[1].handle, reg)); } static void lio_pci_fastread(struct octeon_device *oct, uint32_t offset, uint8_t *hostbuf, uint32_t len) { while ((len) && ((unsigned long)offset) & 7) { *(hostbuf++) = lio_read_bar1_mem8(oct, offset++); len--; } lio_toggle_bar1_swapmode(oct, MEMOPS_IDX); while (len >= 8) { *((uint64_t *)hostbuf) = lio_read_bar1_mem64(oct, offset); offset += 8; hostbuf += 8; len -= 8; } lio_toggle_bar1_swapmode(oct, MEMOPS_IDX); while (len--) *(hostbuf++) = lio_read_bar1_mem8(oct, offset++); } /* Core mem read/write with temporary bar1 settings. */ /* op = 1 to read, op = 0 to write. */ static void lio_pci_rw_core_mem(struct octeon_device *oct, uint64_t addr, uint8_t *hostbuf, uint32_t len, uint32_t op) { uint64_t static_mapping_base; uint32_t copy_len = 0, index_reg_val = 0; uint32_t offset; static_mapping_base = oct->console_nb_info.dram_region_base; if (static_mapping_base && static_mapping_base == (addr & 0xFFFFFFFFFFC00000ULL)) { int bar1_index = oct->console_nb_info.bar1_index; offset = (bar1_index << 22) + (addr & 0x3fffff); if (op) lio_pci_fastread(oct, offset, hostbuf, len); else lio_pci_fastwrite(oct, offset, hostbuf, len); return; } mtx_lock(&oct->mem_access_lock); /* Save the original index reg value. */ index_reg_val = oct->fn_list.bar1_idx_read(oct, MEMOPS_IDX); do { oct->fn_list.bar1_idx_setup(oct, addr, MEMOPS_IDX, 1); offset = (MEMOPS_IDX << 22) + (addr & 0x3fffff); /* * If operation crosses a 4MB boundary, split the transfer * at the 4MB boundary. */ if (((addr + len - 1) & ~(0x3fffff)) != (addr & ~(0x3fffff))) { copy_len = (uint32_t)(((addr & ~(0x3fffff)) + (MEMOPS_IDX << 22)) - addr); } else { copy_len = len; } if (op) { /* read from core */ lio_pci_fastread(oct, offset, hostbuf, copy_len); } else { lio_pci_fastwrite(oct, offset, hostbuf, copy_len); } len -= copy_len; addr += copy_len; hostbuf += copy_len; } while (len); oct->fn_list.bar1_idx_write(oct, MEMOPS_IDX, index_reg_val); mtx_unlock(&oct->mem_access_lock); } void lio_pci_read_core_mem(struct octeon_device *oct, uint64_t coreaddr, uint8_t *buf, uint32_t len) { lio_pci_rw_core_mem(oct, coreaddr, buf, len, 1); } void lio_pci_write_core_mem(struct octeon_device *oct, uint64_t coreaddr, uint8_t *buf, uint32_t len) { lio_pci_rw_core_mem(oct, coreaddr, buf, len, 0); } uint64_t lio_read_device_mem64(struct octeon_device *oct, uint64_t coreaddr) { __be64 ret; lio_pci_rw_core_mem(oct, coreaddr, (uint8_t *)&ret, 8, 1); return (be64toh(ret)); } uint32_t lio_read_device_mem32(struct octeon_device *oct, uint64_t coreaddr) { __be32 ret; lio_pci_rw_core_mem(oct, coreaddr, (uint8_t *)&ret, 4, 1); return (be32toh(ret)); } void lio_write_device_mem32(struct octeon_device *oct, uint64_t coreaddr, uint32_t val) { __be32 t = htobe32(val); lio_pci_rw_core_mem(oct, coreaddr, (uint8_t *)&t, 4, 0); } Index: head/sys/dev/liquidio/base/lio_request_manager.c =================================================================== --- head/sys/dev/liquidio/base/lio_request_manager.c (revision 324993) +++ head/sys/dev/liquidio/base/lio_request_manager.c (revision 324994) @@ -1,857 +1,858 @@ /* * BSD LICENSE * * Copyright(c) 2017 Cavium, Inc.. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Cavium, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "lio_bsd.h" #include "lio_common.h" #include "lio_droq.h" #include "lio_iq.h" #include "lio_response_manager.h" #include "lio_device.h" #include "lio_main.h" #include "lio_network.h" #include "cn23xx_pf_device.h" #include "lio_rxtx.h" struct lio_iq_post_status { int status; int index; }; static void lio_check_db_timeout(void *arg, int pending); static void __lio_check_db_timeout(struct octeon_device *oct, uint64_t iq_no); /* Return 0 on success, 1 on failure */ int lio_init_instr_queue(struct octeon_device *oct, union octeon_txpciq txpciq, uint32_t num_descs) { struct lio_instr_queue *iq; struct lio_iq_config *conf = NULL; struct lio_tq *db_tq; struct lio_request_list *request_buf; bus_size_t max_size; uint32_t iq_no = (uint32_t)txpciq.s.q_no; uint32_t q_size; int error, i; if (LIO_CN23XX_PF(oct)) conf = &(LIO_GET_IQ_CFG(LIO_CHIP_CONF(oct, cn23xx_pf))); if (conf == NULL) { lio_dev_err(oct, "Unsupported Chip %x\n", oct->chip_id); return (1); } q_size = (uint32_t)conf->instr_type * num_descs; iq = oct->instr_queue[iq_no]; iq->oct_dev = oct; max_size = LIO_CN23XX_PKI_MAX_FRAME_SIZE * num_descs; error = bus_dma_tag_create(bus_get_dma_tag(oct->device), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ max_size, /* maxsize */ LIO_MAX_SG, /* nsegments */ PAGE_SIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &iq->txtag); if (error) { lio_dev_err(oct, "Cannot allocate memory for instr queue %d\n", iq_no); return (1); } - iq->base_addr = lio_dma_alloc(q_size, &iq->base_addr_dma); + iq->base_addr = lio_dma_alloc(q_size, (vm_paddr_t *)&iq->base_addr_dma); if (!iq->base_addr) { lio_dev_err(oct, "Cannot allocate memory for instr queue %d\n", iq_no); return (1); } iq->max_count = num_descs; /* * Initialize a list to holds requests that have been posted to * Octeon but has yet to be fetched by octeon */ iq->request_list = malloc(sizeof(*iq->request_list) * num_descs, M_DEVBUF, M_NOWAIT | M_ZERO); if (iq->request_list == NULL) { lio_dev_err(oct, "Alloc failed for IQ[%d] nr free list\n", iq_no); return (1); } - lio_dev_dbg(oct, "IQ[%d]: base: %p basedma: %lx count: %d\n", - iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count); + lio_dev_dbg(oct, "IQ[%d]: base: %p basedma: %llx count: %d\n", + iq_no, iq->base_addr, LIO_CAST64(iq->base_addr_dma), + iq->max_count); /* Create the descriptor buffer dma maps */ request_buf = iq->request_list; for (i = 0; i < num_descs; i++, request_buf++) { error = bus_dmamap_create(iq->txtag, 0, &request_buf->map); if (error) { lio_dev_err(oct, "Unable to create TX DMA map\n"); return (1); } } iq->txpciq.txpciq64 = txpciq.txpciq64; iq->fill_cnt = 0; iq->host_write_index = 0; iq->octeon_read_index = 0; iq->flush_index = 0; iq->last_db_time = 0; iq->db_timeout = (uint32_t)conf->db_timeout; atomic_store_rel_int(&iq->instr_pending, 0); /* Initialize the lock for this instruction queue */ mtx_init(&iq->lock, "Tx_lock", NULL, MTX_DEF); mtx_init(&iq->post_lock, "iq_post_lock", NULL, MTX_DEF); mtx_init(&iq->enq_lock, "enq_lock", NULL, MTX_DEF); mtx_init(&iq->iq_flush_running_lock, "iq_flush_running_lock", NULL, MTX_DEF); oct->io_qmask.iq |= BIT_ULL(iq_no); /* Set the 32B/64B mode for each input queue */ oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no); iq->iqcmd_64B = (conf->instr_type == 64); oct->fn_list.setup_iq_regs(oct, iq_no); db_tq = &oct->check_db_tq[iq_no]; db_tq->tq = taskqueue_create("lio_check_db_timeout", M_WAITOK, taskqueue_thread_enqueue, &db_tq->tq); if (db_tq->tq == NULL) { lio_dev_err(oct, "check db wq create failed for iq %d\n", iq_no); return (1); } TIMEOUT_TASK_INIT(db_tq->tq, &db_tq->work, 0, lio_check_db_timeout, (void *)db_tq); db_tq->ctxul = iq_no; db_tq->ctxptr = oct; taskqueue_start_threads(&db_tq->tq, 1, PI_NET, "lio%d_check_db_timeout:%d", oct->octeon_id, iq_no); taskqueue_enqueue_timeout(db_tq->tq, &db_tq->work, 1); /* Allocate a buf ring */ oct->instr_queue[iq_no]->br = buf_ring_alloc(LIO_BR_SIZE, M_DEVBUF, M_WAITOK, &oct->instr_queue[iq_no]->enq_lock); if (oct->instr_queue[iq_no]->br == NULL) { lio_dev_err(oct, "Critical Failure setting up buf ring\n"); return (1); } return (0); } int lio_delete_instr_queue(struct octeon_device *oct, uint32_t iq_no) { struct lio_instr_queue *iq = oct->instr_queue[iq_no]; struct lio_request_list *request_buf; struct lio_mbuf_free_info *finfo; uint64_t desc_size = 0, q_size; int i; lio_dev_dbg(oct, "%s[%d]\n", __func__, iq_no); if (oct->check_db_tq[iq_no].tq != NULL) { while (taskqueue_cancel_timeout(oct->check_db_tq[iq_no].tq, &oct->check_db_tq[iq_no].work, NULL)) taskqueue_drain_timeout(oct->check_db_tq[iq_no].tq, &oct->check_db_tq[iq_no].work); taskqueue_free(oct->check_db_tq[iq_no].tq); oct->check_db_tq[iq_no].tq = NULL; } if (LIO_CN23XX_PF(oct)) desc_size = LIO_GET_IQ_INSTR_TYPE_CFG(LIO_CHIP_CONF(oct, cn23xx_pf)); request_buf = iq->request_list; for (i = 0; i < iq->max_count; i++, request_buf++) { if ((request_buf->reqtype == LIO_REQTYPE_NORESP_NET) || (request_buf->reqtype == LIO_REQTYPE_NORESP_NET_SG)) { if (request_buf->buf != NULL) { finfo = request_buf->buf; bus_dmamap_sync(iq->txtag, request_buf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(iq->txtag, request_buf->map); m_freem(finfo->mb); request_buf->buf = NULL; if (request_buf->map != NULL) { bus_dmamap_destroy(iq->txtag, request_buf->map); request_buf->map = NULL; } } else if (request_buf->map != NULL) { bus_dmamap_unload(iq->txtag, request_buf->map); bus_dmamap_destroy(iq->txtag, request_buf->map); request_buf->map = NULL; } } } if (iq->br != NULL) { buf_ring_free(iq->br, M_DEVBUF); iq->br = NULL; } if (iq->request_list != NULL) { free(iq->request_list, M_DEVBUF); iq->request_list = NULL; } if (iq->txtag != NULL) { bus_dma_tag_destroy(iq->txtag); iq->txtag = NULL; } if (iq->base_addr) { q_size = iq->max_count * desc_size; lio_dma_free((uint32_t)q_size, iq->base_addr); oct->io_qmask.iq &= ~(1ULL << iq_no); bzero(oct->instr_queue[iq_no], sizeof(struct lio_instr_queue)); oct->num_iqs--; return (0); } return (1); } /* Return 0 on success, 1 on failure */ int lio_setup_iq(struct octeon_device *oct, int ifidx, int q_index, union octeon_txpciq txpciq, uint32_t num_descs) { uint32_t iq_no = (uint32_t)txpciq.s.q_no; if (oct->instr_queue[iq_no]->oct_dev != NULL) { lio_dev_dbg(oct, "IQ is in use. Cannot create the IQ: %d again\n", iq_no); oct->instr_queue[iq_no]->txpciq.txpciq64 = txpciq.txpciq64; return (0); } oct->instr_queue[iq_no]->q_index = q_index; oct->instr_queue[iq_no]->ifidx = ifidx; if (lio_init_instr_queue(oct, txpciq, num_descs)) { lio_delete_instr_queue(oct, iq_no); return (1); } oct->num_iqs++; if (oct->fn_list.enable_io_queues(oct)) return (1); return (0); } int lio_wait_for_instr_fetch(struct octeon_device *oct) { int i, retry = 1000, pending, instr_cnt = 0; do { instr_cnt = 0; for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) { if (!(oct->io_qmask.iq & BIT_ULL(i))) continue; pending = atomic_load_acq_int( &oct->instr_queue[i]->instr_pending); if (pending) __lio_check_db_timeout(oct, i); instr_cnt += pending; } if (instr_cnt == 0) break; lio_sleep_timeout(1); } while (retry-- && instr_cnt); return (instr_cnt); } static inline void lio_ring_doorbell(struct octeon_device *oct, struct lio_instr_queue *iq) { if (atomic_load_acq_int(&oct->status) == LIO_DEV_RUNNING) { lio_write_csr32(oct, iq->doorbell_reg, iq->fill_cnt); /* make sure doorbell write goes through */ __compiler_membar(); iq->fill_cnt = 0; iq->last_db_time = ticks; return; } } static inline void __lio_copy_cmd_into_iq(struct lio_instr_queue *iq, uint8_t *cmd) { uint8_t *iqptr, cmdsize; cmdsize = ((iq->iqcmd_64B) ? 64 : 32); iqptr = iq->base_addr + (cmdsize * iq->host_write_index); memcpy(iqptr, cmd, cmdsize); } static inline struct lio_iq_post_status __lio_post_command2(struct lio_instr_queue *iq, uint8_t *cmd) { struct lio_iq_post_status st; st.status = LIO_IQ_SEND_OK; /* * This ensures that the read index does not wrap around to the same * position if queue gets full before Octeon could fetch any instr. */ if (atomic_load_acq_int(&iq->instr_pending) >= (int32_t)(iq->max_count - 1)) { st.status = LIO_IQ_SEND_FAILED; st.index = -1; return (st); } if (atomic_load_acq_int(&iq->instr_pending) >= (int32_t)(iq->max_count - 2)) st.status = LIO_IQ_SEND_STOP; __lio_copy_cmd_into_iq(iq, cmd); /* "index" is returned, host_write_index is modified. */ st.index = iq->host_write_index; iq->host_write_index = lio_incr_index(iq->host_write_index, 1, iq->max_count); iq->fill_cnt++; /* * Flush the command into memory. We need to be sure the data is in * memory before indicating that the instruction is pending. */ wmb(); atomic_add_int(&iq->instr_pending, 1); return (st); } static inline void __lio_add_to_request_list(struct lio_instr_queue *iq, int idx, void *buf, int reqtype) { iq->request_list[idx].buf = buf; iq->request_list[idx].reqtype = reqtype; } /* Can only run in process context */ int lio_process_iq_request_list(struct octeon_device *oct, struct lio_instr_queue *iq, uint32_t budget) { struct lio_soft_command *sc; struct octeon_instr_irh *irh = NULL; struct lio_mbuf_free_info *finfo; void *buf; uint32_t inst_count = 0; uint32_t old = iq->flush_index; int reqtype; while (old != iq->octeon_read_index) { reqtype = iq->request_list[old].reqtype; buf = iq->request_list[old].buf; finfo = buf; if (reqtype == LIO_REQTYPE_NONE) goto skip_this; switch (reqtype) { case LIO_REQTYPE_NORESP_NET: lio_free_mbuf(iq, buf); break; case LIO_REQTYPE_NORESP_NET_SG: lio_free_sgmbuf(iq, buf); break; case LIO_REQTYPE_RESP_NET: case LIO_REQTYPE_SOFT_COMMAND: sc = buf; if (LIO_CN23XX_PF(oct)) irh = (struct octeon_instr_irh *) &sc->cmd.cmd3.irh; if (irh->rflag) { /* * We're expecting a response from Octeon. * It's up to lio_process_ordered_list() to * process sc. Add sc to the ordered soft * command response list because we expect * a response from Octeon. */ mtx_lock(&oct->response_list [LIO_ORDERED_SC_LIST].lock); atomic_add_int(&oct->response_list [LIO_ORDERED_SC_LIST]. pending_req_count, 1); STAILQ_INSERT_TAIL(&oct->response_list [LIO_ORDERED_SC_LIST]. head, &sc->node, entries); mtx_unlock(&oct->response_list [LIO_ORDERED_SC_LIST].lock); } else { if (sc->callback != NULL) { /* This callback must not sleep */ sc->callback(oct, LIO_REQUEST_DONE, sc->callback_arg); } } break; default: lio_dev_err(oct, "%s Unknown reqtype: %d buf: %p at idx %d\n", __func__, reqtype, buf, old); } iq->request_list[old].buf = NULL; iq->request_list[old].reqtype = 0; skip_this: inst_count++; old = lio_incr_index(old, 1, iq->max_count); if ((budget) && (inst_count >= budget)) break; } iq->flush_index = old; return (inst_count); } /* Can only be called from process context */ int lio_flush_iq(struct octeon_device *oct, struct lio_instr_queue *iq, uint32_t budget) { uint32_t inst_processed = 0; uint32_t tot_inst_processed = 0; int tx_done = 1; if (!mtx_trylock(&iq->iq_flush_running_lock)) return (tx_done); mtx_lock(&iq->lock); iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq); do { /* Process any outstanding IQ packets. */ if (iq->flush_index == iq->octeon_read_index) break; if (budget) inst_processed = lio_process_iq_request_list(oct, iq, budget - tot_inst_processed); else inst_processed = lio_process_iq_request_list(oct, iq, 0); if (inst_processed) { atomic_subtract_int(&iq->instr_pending, inst_processed); iq->stats.instr_processed += inst_processed; } tot_inst_processed += inst_processed; inst_processed = 0; } while (tot_inst_processed < budget); if (budget && (tot_inst_processed >= budget)) tx_done = 0; iq->last_db_time = ticks; mtx_unlock(&iq->lock); mtx_unlock(&iq->iq_flush_running_lock); return (tx_done); } /* * Process instruction queue after timeout. * This routine gets called from a taskqueue or when removing the module. */ static void __lio_check_db_timeout(struct octeon_device *oct, uint64_t iq_no) { struct lio_instr_queue *iq; uint64_t next_time; if (oct == NULL) return; iq = oct->instr_queue[iq_no]; if (iq == NULL) return; if (atomic_load_acq_int(&iq->instr_pending)) { /* If ticks - last_db_time < db_timeout do nothing */ next_time = iq->last_db_time + lio_ms_to_ticks(iq->db_timeout); if (!lio_check_timeout(ticks, next_time)) return; iq->last_db_time = ticks; /* Flush the instruction queue */ lio_flush_iq(oct, iq, 0); lio_enable_irq(NULL, iq); } if (oct->props.ifp != NULL && iq->br != NULL) { if (mtx_trylock(&iq->enq_lock)) { if (!drbr_empty(oct->props.ifp, iq->br)) lio_mq_start_locked(oct->props.ifp, iq); mtx_unlock(&iq->enq_lock); } } } /* * Called by the Poll thread at regular intervals to check the instruction * queue for commands to be posted and for commands that were fetched by Octeon. */ static void lio_check_db_timeout(void *arg, int pending) { struct lio_tq *db_tq = (struct lio_tq *)arg; struct octeon_device *oct = db_tq->ctxptr; uint64_t iq_no = db_tq->ctxul; uint32_t delay = 10; __lio_check_db_timeout(oct, iq_no); taskqueue_enqueue_timeout(db_tq->tq, &db_tq->work, lio_ms_to_ticks(delay)); } int lio_send_command(struct octeon_device *oct, uint32_t iq_no, uint32_t force_db, void *cmd, void *buf, uint32_t datasize, uint32_t reqtype) { struct lio_iq_post_status st; struct lio_instr_queue *iq = oct->instr_queue[iq_no]; /* * Get the lock and prevent other tasks and tx interrupt handler * from running. */ mtx_lock(&iq->post_lock); st = __lio_post_command2(iq, cmd); if (st.status != LIO_IQ_SEND_FAILED) { __lio_add_to_request_list(iq, st.index, buf, reqtype); LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize); LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1); if (force_db || (st.status == LIO_IQ_SEND_STOP)) lio_ring_doorbell(oct, iq); } else { LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1); } mtx_unlock(&iq->post_lock); /* * This is only done here to expedite packets being flushed for * cases where there are no IQ completion interrupts. */ return (st.status); } void lio_prepare_soft_command(struct octeon_device *oct, struct lio_soft_command *sc, uint8_t opcode, uint8_t subcode, uint32_t irh_ossp, uint64_t ossp0, uint64_t ossp1) { struct lio_config *lio_cfg; struct octeon_instr_ih3 *ih3; struct octeon_instr_pki_ih3 *pki_ih3; struct octeon_instr_irh *irh; struct octeon_instr_rdp *rdp; KASSERT(opcode <= 15, ("%s, %d, opcode > 15", __func__, __LINE__)); KASSERT(subcode <= 127, ("%s, %d, opcode > 127", __func__, __LINE__)); lio_cfg = lio_get_conf(oct); if (LIO_CN23XX_PF(oct)) { ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3; ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind; pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3; pki_ih3->w = 1; pki_ih3->raw = 1; pki_ih3->utag = 1; pki_ih3->uqpg = oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg; pki_ih3->utt = 1; pki_ih3->tag = LIO_CONTROL; pki_ih3->tagtype = LIO_ATOMIC_TAG; pki_ih3->qpg = oct->instr_queue[sc->iq_no]->txpciq.s.qpg; pki_ih3->pm = 0x7; pki_ih3->sl = 8; if (sc->datasize) ih3->dlengsz = sc->datasize; irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh; irh->opcode = opcode; irh->subcode = subcode; /* opcode/subcode specific parameters (ossp) */ irh->ossp = irh_ossp; sc->cmd.cmd3.ossp[0] = ossp0; sc->cmd.cmd3.ossp[1] = ossp1; if (sc->rdatasize) { rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp; rdp->pcie_port = oct->pcie_port; rdp->rlen = sc->rdatasize; irh->rflag = 1; /* PKI IH3 */ /* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */ ih3->fsz = LIO_SOFTCMDRESP_IH3; } else { irh->rflag = 0; /* PKI IH3 */ /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */ ih3->fsz = LIO_PCICMD_O3; } } } int lio_send_soft_command(struct octeon_device *oct, struct lio_soft_command *sc) { struct octeon_instr_ih3 *ih3; struct octeon_instr_irh *irh; uint32_t len = 0; if (LIO_CN23XX_PF(oct)) { ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3; if (ih3->dlengsz) { KASSERT(sc->dmadptr, ("%s, %d, sc->dmadptr is NULL", __func__, __LINE__)); sc->cmd.cmd3.dptr = sc->dmadptr; } irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh; if (irh->rflag) { KASSERT(sc->dmarptr, ("%s, %d, sc->dmarptr is NULL", __func__, __LINE__)); KASSERT(sc->status_word, ("%s, %d, sc->status_word is NULL", __func__, __LINE__)); *sc->status_word = COMPLETION_WORD_INIT; sc->cmd.cmd3.rptr = sc->dmarptr; } len = (uint32_t)ih3->dlengsz; } if (sc->wait_time) sc->timeout = ticks + lio_ms_to_ticks(sc->wait_time); return (lio_send_command(oct, sc->iq_no, 1, &sc->cmd, sc, len, LIO_REQTYPE_SOFT_COMMAND)); } int lio_setup_sc_buffer_pool(struct octeon_device *oct) { struct lio_soft_command *sc; uint64_t dma_addr; int i; STAILQ_INIT(&oct->sc_buf_pool.head); mtx_init(&oct->sc_buf_pool.lock, "sc_pool_lock", NULL, MTX_DEF); atomic_store_rel_int(&oct->sc_buf_pool.alloc_buf_count, 0); for (i = 0; i < LIO_MAX_SOFT_COMMAND_BUFFERS; i++) { sc = (struct lio_soft_command *) - lio_dma_alloc(LIO_SOFT_COMMAND_BUFFER_SIZE, &dma_addr); + lio_dma_alloc(LIO_SOFT_COMMAND_BUFFER_SIZE, (vm_paddr_t *)&dma_addr); if (sc == NULL) { lio_free_sc_buffer_pool(oct); return (1); } sc->dma_addr = dma_addr; sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE; STAILQ_INSERT_TAIL(&oct->sc_buf_pool.head, &sc->node, entries); } return (0); } int lio_free_sc_buffer_pool(struct octeon_device *oct) { struct lio_stailq_node *tmp, *tmp2; struct lio_soft_command *sc; mtx_lock(&oct->sc_buf_pool.lock); STAILQ_FOREACH_SAFE(tmp, &oct->sc_buf_pool.head, entries, tmp2) { sc = LIO_STAILQ_FIRST_ENTRY(&oct->sc_buf_pool.head, struct lio_soft_command, node); STAILQ_REMOVE_HEAD(&oct->sc_buf_pool.head, entries); lio_dma_free(sc->size, sc); } STAILQ_INIT(&oct->sc_buf_pool.head); mtx_unlock(&oct->sc_buf_pool.lock); return (0); } struct lio_soft_command * lio_alloc_soft_command(struct octeon_device *oct, uint32_t datasize, uint32_t rdatasize, uint32_t ctxsize) { struct lio_soft_command *sc = NULL; struct lio_stailq_node *tmp; uint64_t dma_addr; uint32_t size; uint32_t offset = sizeof(struct lio_soft_command); KASSERT((offset + datasize + rdatasize + ctxsize) <= LIO_SOFT_COMMAND_BUFFER_SIZE, ("%s, %d, offset + datasize + rdatasize + ctxsize > LIO_SOFT_COMMAND_BUFFER_SIZE", __func__, __LINE__)); mtx_lock(&oct->sc_buf_pool.lock); if (STAILQ_EMPTY(&oct->sc_buf_pool.head)) { mtx_unlock(&oct->sc_buf_pool.lock); return (NULL); } tmp = STAILQ_LAST(&oct->sc_buf_pool.head, lio_stailq_node, entries); STAILQ_REMOVE(&oct->sc_buf_pool.head, tmp, lio_stailq_node, entries); atomic_add_int(&oct->sc_buf_pool.alloc_buf_count, 1); mtx_unlock(&oct->sc_buf_pool.lock); sc = (struct lio_soft_command *)tmp; dma_addr = sc->dma_addr; size = sc->size; bzero(sc, sc->size); sc->dma_addr = dma_addr; sc->size = size; if (ctxsize) { sc->ctxptr = (uint8_t *)sc + offset; sc->ctxsize = ctxsize; } /* Start data at 128 byte boundary */ offset = (offset + ctxsize + 127) & 0xffffff80; if (datasize) { sc->virtdptr = (uint8_t *)sc + offset; sc->dmadptr = dma_addr + offset; sc->datasize = datasize; } /* Start rdata at 128 byte boundary */ offset = (offset + datasize + 127) & 0xffffff80; if (rdatasize) { KASSERT(rdatasize >= 16, ("%s, %d, rdatasize < 16", __func__, __LINE__)); sc->virtrptr = (uint8_t *)sc + offset; sc->dmarptr = dma_addr + offset; sc->rdatasize = rdatasize; sc->status_word = (uint64_t *)((uint8_t *)(sc->virtrptr) + rdatasize - 8); } return (sc); } void lio_free_soft_command(struct octeon_device *oct, struct lio_soft_command *sc) { mtx_lock(&oct->sc_buf_pool.lock); STAILQ_INSERT_TAIL(&oct->sc_buf_pool.head, &sc->node, entries); atomic_subtract_int(&oct->sc_buf_pool.alloc_buf_count, 1); mtx_unlock(&oct->sc_buf_pool.lock); } Index: head/sys/dev/liquidio/lio_main.c =================================================================== --- head/sys/dev/liquidio/lio_main.c (revision 324993) +++ head/sys/dev/liquidio/lio_main.c (revision 324994) @@ -1,2309 +1,2310 @@ /* * BSD LICENSE * * Copyright(c) 2017 Cavium, Inc.. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Cavium, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "lio_bsd.h" #include "lio_common.h" #include "lio_droq.h" #include "lio_iq.h" #include "lio_response_manager.h" #include "lio_device.h" #include "lio_ctrl.h" #include "lio_main.h" #include "lio_network.h" #include "cn23xx_pf_device.h" #include "lio_image.h" #include "lio_ioctl.h" #include "lio_rxtx.h" #include "lio_rss.h" /* Number of milliseconds to wait for DDR initialization */ #define LIO_DDR_TIMEOUT 10000 #define LIO_MAX_FW_TYPE_LEN 8 static char fw_type[LIO_MAX_FW_TYPE_LEN]; TUNABLE_STR("hw.lio.fw_type", fw_type, sizeof(fw_type)); /* * Integers that specify number of queues per PF. * Valid range is 0 to 64. * Use 0 to derive from CPU count. */ static int num_queues_per_pf0; static int num_queues_per_pf1; TUNABLE_INT("hw.lio.num_queues_per_pf0", &num_queues_per_pf0); TUNABLE_INT("hw.lio.num_queues_per_pf1", &num_queues_per_pf1); #ifdef RSS static int lio_rss = 1; TUNABLE_INT("hw.lio.rss", &lio_rss); #endif /* RSS */ /* Hardware LRO */ unsigned int lio_hwlro = 0; TUNABLE_INT("hw.lio.hwlro", &lio_hwlro); /* * Bitmask indicating which consoles have debug * output redirected to syslog. */ static unsigned long console_bitmask; TUNABLE_ULONG("hw.lio.console_bitmask", &console_bitmask); /* * \brief determines if a given console has debug enabled. * @param console console to check * @returns 1 = enabled. 0 otherwise */ int lio_console_debug_enabled(uint32_t console) { return (console_bitmask >> (console)) & 0x1; } static int lio_detach(device_t dev); static int lio_device_init(struct octeon_device *octeon_dev); static int lio_chip_specific_setup(struct octeon_device *oct); static void lio_watchdog(void *param); static int lio_load_firmware(struct octeon_device *oct); static int lio_nic_starter(struct octeon_device *oct); static int lio_init_nic_module(struct octeon_device *oct); static int lio_setup_nic_devices(struct octeon_device *octeon_dev); static int lio_link_info(struct lio_recv_info *recv_info, void *ptr); static void lio_if_cfg_callback(struct octeon_device *oct, uint32_t status, void *buf); static int lio_set_rxcsum_command(struct ifnet *ifp, int command, uint8_t rx_cmd); static int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs); static void lio_destroy_nic_device(struct octeon_device *oct, int ifidx); static inline void lio_update_link_status(struct ifnet *ifp, union octeon_link_status *ls); static void lio_send_rx_ctrl_cmd(struct lio *lio, int start_stop); static int lio_stop_nic_module(struct octeon_device *oct); static void lio_destroy_resources(struct octeon_device *oct); static int lio_setup_rx_oom_poll_fn(struct ifnet *ifp); static void lio_vlan_rx_add_vid(void *arg, struct ifnet *ifp, uint16_t vid); static void lio_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, uint16_t vid); static struct octeon_device * lio_get_other_octeon_device(struct octeon_device *oct); static int lio_wait_for_oq_pkts(struct octeon_device *oct); int lio_send_rss_param(struct lio *lio); static int lio_dbg_console_print(struct octeon_device *oct, uint32_t console_num, char *prefix, char *suffix); /* Polling interval for determining when NIC application is alive */ #define LIO_STARTER_POLL_INTERVAL_MS 100 /* * vendor_info_array. * This array contains the list of IDs on which the driver should load. */ struct lio_vendor_info { uint16_t vendor_id; uint16_t device_id; uint16_t subdevice_id; uint8_t revision_id; uint8_t index; }; static struct lio_vendor_info lio_pci_tbl[] = { /* CN2350 10G */ {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_10G_SUBDEVICE, 0x02, 0}, /* CN2350 10G */ {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_10G_SUBDEVICE1, 0x02, 0}, /* CN2360 10G */ {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2360_10G_SUBDEVICE, 0x02, 1}, /* CN2350 25G */ {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_25G_SUBDEVICE, 0x02, 2}, /* CN2360 25G */ {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2360_25G_SUBDEVICE, 0x02, 3}, {0, 0, 0, 0, 0} }; static char *lio_strings[] = { "LiquidIO 2350 10GbE Server Adapter", "LiquidIO 2360 10GbE Server Adapter", "LiquidIO 2350 25GbE Server Adapter", "LiquidIO 2360 25GbE Server Adapter", }; struct lio_if_cfg_resp { uint64_t rh; struct octeon_if_cfg_info cfg_info; uint64_t status; }; struct lio_if_cfg_context { int octeon_id; volatile int cond; }; struct lio_rx_ctl_context { int octeon_id; volatile int cond; }; static int lio_probe(device_t dev) { struct lio_vendor_info *tbl; uint16_t vendor_id; uint16_t device_id; uint16_t subdevice_id; uint8_t revision_id; char device_ver[256]; vendor_id = pci_get_vendor(dev); if (vendor_id != PCI_VENDOR_ID_CAVIUM) return (ENXIO); device_id = pci_get_device(dev); subdevice_id = pci_get_subdevice(dev); revision_id = pci_get_revid(dev); tbl = lio_pci_tbl; while (tbl->vendor_id) { if ((vendor_id == tbl->vendor_id) && (device_id == tbl->device_id) && (subdevice_id == tbl->subdevice_id) && (revision_id == tbl->revision_id)) { sprintf(device_ver, "%s, Version - %s", lio_strings[tbl->index], LIO_VERSION); device_set_desc_copy(dev, device_ver); return (BUS_PROBE_DEFAULT); } tbl++; } return (ENXIO); } static int lio_attach(device_t device) { struct octeon_device *oct_dev = NULL; uint64_t scratch1; uint32_t error; int timeout, ret = 1; uint8_t bus, dev, function; oct_dev = lio_allocate_device(device); if (oct_dev == NULL) { device_printf(device, "Error: Unable to allocate device\n"); return (-ENOMEM); } oct_dev->tx_budget = LIO_DEFAULT_TX_PKTS_PROCESS_BUDGET; oct_dev->rx_budget = LIO_DEFAULT_RX_PKTS_PROCESS_BUDGET; oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; oct_dev->device = device; bus = pci_get_bus(device); dev = pci_get_slot(device); function = pci_get_function(device); lio_dev_info(oct_dev, "Initializing device %x:%x %02x:%02x.%01x\n", pci_get_vendor(device), pci_get_device(device), bus, dev, function); if (lio_device_init(oct_dev)) { lio_dev_err(oct_dev, "Failed to init device\n"); lio_detach(device); return (-ENOMEM); } scratch1 = lio_read_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1); if (!(scratch1 & 4ULL)) { /* * Bit 2 of SLI_SCRATCH_1 is a flag that indicates that * the lio watchdog kernel thread is running for this * NIC. Each NIC gets one watchdog kernel thread. */ scratch1 |= 4ULL; lio_write_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1, scratch1); error = kproc_create(lio_watchdog, oct_dev, &oct_dev->watchdog_task, 0, 0, "liowd/%02hhx:%02hhx.%hhx", bus, dev, function); if (!error) { kproc_resume(oct_dev->watchdog_task); } else { oct_dev->watchdog_task = NULL; lio_dev_err(oct_dev, "failed to create kernel_thread\n"); lio_detach(device); return (-1); } } oct_dev->rx_pause = 1; oct_dev->tx_pause = 1; timeout = 0; while (timeout < LIO_NIC_STARTER_TIMEOUT) { lio_mdelay(LIO_STARTER_POLL_INTERVAL_MS); timeout += LIO_STARTER_POLL_INTERVAL_MS; /* * During the boot process interrupts are not available. * So polling for first control message from FW. */ if (cold) lio_droq_bh(oct_dev->droq[0], 0); if (atomic_load_acq_int(&oct_dev->status) == LIO_DEV_CORE_OK) { ret = lio_nic_starter(oct_dev); break; } } if (ret) { lio_dev_err(oct_dev, "Firmware failed to start\n"); lio_detach(device); return (-EIO); } lio_dev_dbg(oct_dev, "Device is ready\n"); return (0); } static int lio_detach(device_t dev) { struct octeon_device *oct_dev = device_get_softc(dev); lio_dev_dbg(oct_dev, "Stopping device\n"); if (oct_dev->watchdog_task) { uint64_t scratch1; kproc_suspend(oct_dev->watchdog_task, 0); scratch1 = lio_read_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1); scratch1 &= ~4ULL; lio_write_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1, scratch1); } if (oct_dev->app_mode && (oct_dev->app_mode == LIO_DRV_NIC_APP)) lio_stop_nic_module(oct_dev); /* * Reset the octeon device and cleanup all memory allocated for * the octeon device by driver. */ lio_destroy_resources(oct_dev); lio_dev_info(oct_dev, "Device removed\n"); /* * This octeon device has been removed. Update the global * data structure to reflect this. Free the device structure. */ lio_free_device_mem(oct_dev); return (0); } static int lio_shutdown(device_t dev) { struct octeon_device *oct_dev = device_get_softc(dev); struct lio *lio = if_getsoftc(oct_dev->props.ifp); lio_send_rx_ctrl_cmd(lio, 0); return (0); } static int lio_suspend(device_t dev) { return (ENXIO); } static int lio_resume(device_t dev) { return (ENXIO); } static int lio_event(struct module *mod, int event, void *junk) { switch (event) { case MOD_LOAD: lio_init_device_list(LIO_CFG_TYPE_DEFAULT); break; default: break; } return (0); } /********************************************************************* * FreeBSD Device Interface Entry Points * *******************************************************************/ static device_method_t lio_methods[] = { /* Device interface */ DEVMETHOD(device_probe, lio_probe), DEVMETHOD(device_attach, lio_attach), DEVMETHOD(device_detach, lio_detach), DEVMETHOD(device_shutdown, lio_shutdown), DEVMETHOD(device_suspend, lio_suspend), DEVMETHOD(device_resume, lio_resume), DEVMETHOD_END }; static driver_t lio_driver = { LIO_DRV_NAME, lio_methods, sizeof(struct octeon_device), }; devclass_t lio_devclass; DRIVER_MODULE(lio, pci, lio_driver, lio_devclass, lio_event, 0); MODULE_DEPEND(lio, pci, 1, 1, 1); MODULE_DEPEND(lio, ether, 1, 1, 1); MODULE_DEPEND(lio, firmware, 1, 1, 1); static bool fw_type_is_none(void) { return strncmp(fw_type, LIO_FW_NAME_TYPE_NONE, sizeof(LIO_FW_NAME_TYPE_NONE)) == 0; } /* * \brief Device initialization for each Octeon device that is probed * @param octeon_dev octeon device */ static int lio_device_init(struct octeon_device *octeon_dev) { unsigned long ddr_timeout = LIO_DDR_TIMEOUT; char *dbg_enb = NULL; int fw_loaded = 0; int i, j, ret; uint8_t bus, dev, function; char bootcmd[] = "\n"; bus = pci_get_bus(octeon_dev->device); dev = pci_get_slot(octeon_dev->device); function = pci_get_function(octeon_dev->device); atomic_store_rel_int(&octeon_dev->status, LIO_DEV_BEGIN_STATE); /* Enable access to the octeon device */ if (pci_enable_busmaster(octeon_dev->device)) { lio_dev_err(octeon_dev, "pci_enable_device failed\n"); return (1); } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_PCI_ENABLE_DONE); /* Identify the Octeon type and map the BAR address space. */ if (lio_chip_specific_setup(octeon_dev)) { lio_dev_err(octeon_dev, "Chip specific setup failed\n"); return (1); } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_PCI_MAP_DONE); /* * Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE', * since that is what is required for the reference to be removed * during de-initialization (see 'octeon_destroy_resources'). */ lio_register_device(octeon_dev, bus, dev, function, true); octeon_dev->app_mode = LIO_DRV_INVALID_APP; if (!lio_cn23xx_pf_fw_loaded(octeon_dev) && !fw_type_is_none()) { fw_loaded = 0; /* Do a soft reset of the Octeon device. */ if (octeon_dev->fn_list.soft_reset(octeon_dev)) return (1); /* things might have changed */ if (!lio_cn23xx_pf_fw_loaded(octeon_dev)) fw_loaded = 0; else fw_loaded = 1; } else { fw_loaded = 1; } /* * Initialize the dispatch mechanism used to push packets arriving on * Octeon Output queues. */ if (lio_init_dispatch_list(octeon_dev)) return (1); lio_register_dispatch_fn(octeon_dev, LIO_OPCODE_NIC, LIO_OPCODE_NIC_CORE_DRV_ACTIVE, lio_core_drv_init, octeon_dev); atomic_store_rel_int(&octeon_dev->status, LIO_DEV_DISPATCH_INIT_DONE); ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); if (ret) { lio_dev_err(octeon_dev, "Failed to configure device registers\n"); return (ret); } /* Initialize soft command buffer pool */ if (lio_setup_sc_buffer_pool(octeon_dev)) { lio_dev_err(octeon_dev, "sc buffer pool allocation failed\n"); return (1); } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_SC_BUFF_POOL_INIT_DONE); if (lio_allocate_ioq_vector(octeon_dev)) { lio_dev_err(octeon_dev, "IOQ vector allocation failed\n"); return (1); } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_MSIX_ALLOC_VECTOR_DONE); for (i = 0; i < LIO_MAX_POSSIBLE_INSTR_QUEUES; i++) { octeon_dev->instr_queue[i] = malloc(sizeof(struct lio_instr_queue), M_DEVBUF, M_NOWAIT | M_ZERO); if (octeon_dev->instr_queue[i] == NULL) return (1); } /* Setup the data structures that manage this Octeon's Input queues. */ if (lio_setup_instr_queue0(octeon_dev)) { lio_dev_err(octeon_dev, "Instruction queue initialization failed\n"); return (1); } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_INSTR_QUEUE_INIT_DONE); /* * Initialize lists to manage the requests of different types that * arrive from user & kernel applications for this octeon device. */ if (lio_setup_response_list(octeon_dev)) { lio_dev_err(octeon_dev, "Response list allocation failed\n"); return (1); } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_RESP_LIST_INIT_DONE); for (i = 0; i < LIO_MAX_POSSIBLE_OUTPUT_QUEUES; i++) { octeon_dev->droq[i] = malloc(sizeof(*octeon_dev->droq[i]), M_DEVBUF, M_NOWAIT | M_ZERO); if (octeon_dev->droq[i] == NULL) return (1); } if (lio_setup_output_queue0(octeon_dev)) { lio_dev_err(octeon_dev, "Output queue initialization failed\n"); return (1); } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_DROQ_INIT_DONE); /* * Setup the interrupt handler and record the INT SUM register address */ if (lio_setup_interrupt(octeon_dev, octeon_dev->sriov_info.num_pf_rings)) return (1); /* Enable Octeon device interrupts */ octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR); atomic_store_rel_int(&octeon_dev->status, LIO_DEV_INTR_SET_DONE); /* * Send Credit for Octeon Output queues. Credits are always sent BEFORE * the output queue is enabled. * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0. * Otherwise, it is possible that the DRV_ACTIVE message will be sent * before any credits have been issued, causing the ring to be reset * (and the f/w appear to never have started). */ for (j = 0; j < octeon_dev->num_oqs; j++) lio_write_csr32(octeon_dev, octeon_dev->droq[j]->pkts_credit_reg, octeon_dev->droq[j]->max_count); /* Enable the input and output queues for this Octeon device */ ret = octeon_dev->fn_list.enable_io_queues(octeon_dev); if (ret) { lio_dev_err(octeon_dev, "Failed to enable input/output queues"); return (ret); } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_IO_QUEUES_DONE); if (!fw_loaded) { lio_dev_dbg(octeon_dev, "Waiting for DDR initialization...\n"); if (!ddr_timeout) { lio_dev_info(octeon_dev, "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n"); } lio_sleep_timeout(LIO_RESET_MSECS); /* * Wait for the octeon to initialize DDR after the * soft-reset. */ while (!ddr_timeout) { if (pause("-", lio_ms_to_ticks(100))) { /* user probably pressed Control-C */ return (1); } } ret = lio_wait_for_ddr_init(octeon_dev, &ddr_timeout); if (ret) { lio_dev_err(octeon_dev, "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n", ret); return (1); } if (lio_wait_for_bootloader(octeon_dev, 1100)) { lio_dev_err(octeon_dev, "Board not responding\n"); return (1); } /* Divert uboot to take commands from host instead. */ ret = lio_console_send_cmd(octeon_dev, bootcmd, 50); lio_dev_dbg(octeon_dev, "Initializing consoles\n"); ret = lio_init_consoles(octeon_dev); if (ret) { lio_dev_err(octeon_dev, "Could not access board consoles\n"); return (1); } /* * If console debug enabled, specify empty string to * use default enablement ELSE specify NULL string for * 'disabled'. */ dbg_enb = lio_console_debug_enabled(0) ? "" : NULL; ret = lio_add_console(octeon_dev, 0, dbg_enb); if (ret) { lio_dev_err(octeon_dev, "Could not access board console\n"); return (1); } else if (lio_console_debug_enabled(0)) { /* * If console was added AND we're logging console output * then set our console print function. */ octeon_dev->console[0].print = lio_dbg_console_print; } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_CONSOLE_INIT_DONE); lio_dev_dbg(octeon_dev, "Loading firmware\n"); ret = lio_load_firmware(octeon_dev); if (ret) { lio_dev_err(octeon_dev, "Could not load firmware to board\n"); return (1); } } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_HOST_OK); return (0); } /* * \brief PCI FLR for each Octeon device. * @param oct octeon device */ static void lio_pci_flr(struct octeon_device *oct) { uint32_t exppos, status; pci_find_cap(oct->device, PCIY_EXPRESS, &exppos); pci_save_state(oct->device); /* Quiesce the device completely */ pci_write_config(oct->device, PCIR_COMMAND, PCIM_CMD_INTxDIS, 2); /* Wait for Transaction Pending bit clean */ lio_mdelay(100); status = pci_read_config(oct->device, exppos + PCIER_DEVICE_STA, 2); if (status & PCIEM_STA_TRANSACTION_PND) { lio_dev_info(oct, "Function reset incomplete after 100ms, sleeping for 5 seconds\n"); lio_mdelay(5); status = pci_read_config(oct->device, exppos + PCIER_DEVICE_STA, 2); if (status & PCIEM_STA_TRANSACTION_PND) lio_dev_info(oct, "Function reset still incomplete after 5s, reset anyway\n"); } pci_write_config(oct->device, exppos + PCIER_DEVICE_CTL, PCIEM_CTL_INITIATE_FLR, 2); lio_mdelay(100); pci_restore_state(oct->device); } /* * \brief Debug console print function * @param octeon_dev octeon device * @param console_num console number * @param prefix first portion of line to display * @param suffix second portion of line to display * * The OCTEON debug console outputs entire lines (excluding '\n'). * Normally, the line will be passed in the 'prefix' parameter. * However, due to buffering, it is possible for a line to be split into two * parts, in which case they will be passed as the 'prefix' parameter and * 'suffix' parameter. */ static int lio_dbg_console_print(struct octeon_device *oct, uint32_t console_num, char *prefix, char *suffix) { if (prefix != NULL && suffix != NULL) lio_dev_info(oct, "%u: %s%s\n", console_num, prefix, suffix); else if (prefix != NULL) lio_dev_info(oct, "%u: %s\n", console_num, prefix); else if (suffix != NULL) lio_dev_info(oct, "%u: %s\n", console_num, suffix); return (0); } static void lio_watchdog(void *param) { int core_num; uint16_t mask_of_crashed_or_stuck_cores = 0; struct octeon_device *oct = param; bool err_msg_was_printed[12]; bzero(err_msg_was_printed, sizeof(err_msg_was_printed)); while (1) { kproc_suspend_check(oct->watchdog_task); mask_of_crashed_or_stuck_cores = (uint16_t)lio_read_csr64(oct, LIO_CN23XX_SLI_SCRATCH2); if (mask_of_crashed_or_stuck_cores) { struct octeon_device *other_oct; oct->cores_crashed = true; other_oct = lio_get_other_octeon_device(oct); if (other_oct != NULL) other_oct->cores_crashed = true; for (core_num = 0; core_num < LIO_MAX_CORES; core_num++) { bool core_crashed_or_got_stuck; core_crashed_or_got_stuck = (mask_of_crashed_or_stuck_cores >> core_num) & 1; if (core_crashed_or_got_stuck && !err_msg_was_printed[core_num]) { lio_dev_err(oct, "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n", core_num); err_msg_was_printed[core_num] = true; } } } /* sleep for two seconds */ pause("-", lio_ms_to_ticks(2000)); } } static int lio_chip_specific_setup(struct octeon_device *oct) { char *s; uint32_t dev_id, rev_id; int ret = 1; dev_id = lio_read_pci_cfg(oct, 0); rev_id = pci_get_revid(oct->device); oct->subdevice_id = pci_get_subdevice(oct->device); switch (dev_id) { case LIO_CN23XX_PF_PCIID: oct->chip_id = LIO_CN23XX_PF_VID; if (pci_get_function(oct->device) == 0) { if (num_queues_per_pf0 < 0) { lio_dev_info(oct, "Invalid num_queues_per_pf0: %d, Setting it to default\n", num_queues_per_pf0); num_queues_per_pf0 = 0; } oct->sriov_info.num_pf_rings = num_queues_per_pf0; } else { if (num_queues_per_pf1 < 0) { lio_dev_info(oct, "Invalid num_queues_per_pf1: %d, Setting it to default\n", num_queues_per_pf1); num_queues_per_pf1 = 0; } oct->sriov_info.num_pf_rings = num_queues_per_pf1; } ret = lio_cn23xx_pf_setup_device(oct); s = "CN23XX"; break; default: s = "?"; lio_dev_err(oct, "Unknown device found (dev_id: %x)\n", dev_id); } if (!ret) lio_dev_info(oct, "%s PASS%d.%d %s Version: %s\n", s, OCTEON_MAJOR_REV(oct), OCTEON_MINOR_REV(oct), lio_get_conf(oct)->card_name, LIO_VERSION); return (ret); } static struct octeon_device * lio_get_other_octeon_device(struct octeon_device *oct) { struct octeon_device *other_oct; other_oct = lio_get_device(oct->octeon_id + 1); if ((other_oct != NULL) && other_oct->device) { int oct_busnum, other_oct_busnum; oct_busnum = pci_get_bus(oct->device); other_oct_busnum = pci_get_bus(other_oct->device); if (oct_busnum == other_oct_busnum) { int oct_slot, other_oct_slot; oct_slot = pci_get_slot(oct->device); other_oct_slot = pci_get_slot(other_oct->device); if (oct_slot == other_oct_slot) return (other_oct); } } return (NULL); } /* * \brief Load firmware to device * @param oct octeon device * * Maps device to firmware filename, requests firmware, and downloads it */ static int lio_load_firmware(struct octeon_device *oct) { const struct firmware *fw; char *tmp_fw_type = NULL; int ret = 0; char fw_name[LIO_MAX_FW_FILENAME_LEN]; if (fw_type[0] == '\0') tmp_fw_type = LIO_FW_NAME_TYPE_NIC; else tmp_fw_type = fw_type; sprintf(fw_name, "%s%s_%s%s", LIO_FW_BASE_NAME, lio_get_conf(oct)->card_name, tmp_fw_type, LIO_FW_NAME_SUFFIX); fw = firmware_get(fw_name); if (fw == NULL) { lio_dev_err(oct, "Request firmware failed. Could not find file %s.\n", fw_name); return (EINVAL); } ret = lio_download_firmware(oct, fw->data, fw->datasize); firmware_put(fw, FIRMWARE_UNLOAD); return (ret); } static int lio_nic_starter(struct octeon_device *oct) { int ret = 0; atomic_store_rel_int(&oct->status, LIO_DEV_RUNNING); if (oct->app_mode && oct->app_mode == LIO_DRV_NIC_APP) { if (lio_init_nic_module(oct)) { lio_dev_err(oct, "NIC initialization failed\n"); ret = -1; #ifdef CAVIUM_ONiLY_23XX_VF } else { if (octeon_enable_sriov(oct) < 0) ret = -1; #endif } } else { lio_dev_err(oct, "Unexpected application running on NIC (%d). Check firmware.\n", oct->app_mode); ret = -1; } return (ret); } static int lio_init_nic_module(struct octeon_device *oct) { int num_nic_ports = LIO_GET_NUM_NIC_PORTS_CFG(lio_get_conf(oct)); int retval = 0; lio_dev_dbg(oct, "Initializing network interfaces\n"); /* * only default iq and oq were initialized * initialize the rest as well */ /* run port_config command for each port */ oct->ifcount = num_nic_ports; bzero(&oct->props, sizeof(struct lio_if_props)); oct->props.gmxport = -1; retval = lio_setup_nic_devices(oct); if (retval) { lio_dev_err(oct, "Setup NIC devices failed\n"); goto lio_init_failure; } lio_dev_dbg(oct, "Network interfaces ready\n"); return (retval); lio_init_failure: oct->ifcount = 0; return (retval); } static int lio_ifmedia_update(struct ifnet *ifp) { struct lio *lio = if_getsoftc(ifp); struct ifmedia *ifm; ifm = &lio->ifmedia; /* We only support Ethernet media type. */ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: break; case IFM_10G_CX4: case IFM_10G_SR: case IFM_10G_T: case IFM_10G_TWINAX: default: /* We don't support changing the media type. */ lio_dev_err(lio->oct_dev, "Invalid media type (%d)\n", IFM_SUBTYPE(ifm->ifm_media)); return (EINVAL); } return (0); } static int lio_get_media_subtype(struct octeon_device *oct) { switch(oct->subdevice_id) { case LIO_CN2350_10G_SUBDEVICE: case LIO_CN2350_10G_SUBDEVICE1: case LIO_CN2360_10G_SUBDEVICE: return (IFM_10G_SR); case LIO_CN2350_25G_SUBDEVICE: case LIO_CN2360_25G_SUBDEVICE: return (IFM_25G_SR); } return (IFM_10G_SR); } -static unsigned long +static uint64_t lio_get_baudrate(struct octeon_device *oct) { switch(oct->subdevice_id) { case LIO_CN2350_10G_SUBDEVICE: case LIO_CN2350_10G_SUBDEVICE1: case LIO_CN2360_10G_SUBDEVICE: return (IF_Gbps(10)); case LIO_CN2350_25G_SUBDEVICE: case LIO_CN2360_25G_SUBDEVICE: return (IF_Gbps(25)); } return (IF_Gbps(10)); } static void lio_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct lio *lio = if_getsoftc(ifp); /* Report link down if the driver isn't running. */ if (!lio_ifstate_check(lio, LIO_IFSTATE_RUNNING)) { ifmr->ifm_active |= IFM_NONE; return; } /* Setup the default interface info. */ ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (lio->linfo.link.s.link_up) { ifmr->ifm_status |= IFM_ACTIVE; } else { ifmr->ifm_active |= IFM_NONE; return; } ifmr->ifm_active |= lio_get_media_subtype(lio->oct_dev); if (lio->linfo.link.s.duplex) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } static uint64_t lio_get_counter(if_t ifp, ift_counter cnt) { struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; uint64_t counter = 0; int i, q_no; switch (cnt) { case IFCOUNTER_IPACKETS: for (i = 0; i < oct->num_oqs; i++) { q_no = lio->linfo.rxpciq[i].s.q_no; counter += oct->droq[q_no]->stats.rx_pkts_received; } break; case IFCOUNTER_OPACKETS: for (i = 0; i < oct->num_iqs; i++) { q_no = lio->linfo.txpciq[i].s.q_no; counter += oct->instr_queue[q_no]->stats.tx_done; } break; case IFCOUNTER_IBYTES: for (i = 0; i < oct->num_oqs; i++) { q_no = lio->linfo.rxpciq[i].s.q_no; counter += oct->droq[q_no]->stats.rx_bytes_received; } break; case IFCOUNTER_OBYTES: for (i = 0; i < oct->num_iqs; i++) { q_no = lio->linfo.txpciq[i].s.q_no; counter += oct->instr_queue[q_no]->stats.tx_tot_bytes; } break; case IFCOUNTER_IQDROPS: for (i = 0; i < oct->num_oqs; i++) { q_no = lio->linfo.rxpciq[i].s.q_no; counter += oct->droq[q_no]->stats.rx_dropped; } break; case IFCOUNTER_OQDROPS: for (i = 0; i < oct->num_iqs; i++) { q_no = lio->linfo.txpciq[i].s.q_no; counter += oct->instr_queue[q_no]->stats.tx_dropped; } break; case IFCOUNTER_IMCASTS: counter = oct->link_stats.fromwire.total_mcst; break; case IFCOUNTER_OMCASTS: counter = oct->link_stats.fromhost.mcast_pkts_sent; break; case IFCOUNTER_COLLISIONS: counter = oct->link_stats.fromhost.total_collisions; break; case IFCOUNTER_IERRORS: counter = oct->link_stats.fromwire.fcs_err + oct->link_stats.fromwire.l2_err + oct->link_stats.fromwire.frame_err; break; default: return (if_get_counter_default(ifp, cnt)); } return (counter); } static int lio_init_ifnet(struct lio *lio) { struct octeon_device *oct = lio->oct_dev; if_t ifp = lio->ifp; /* ifconfig entrypoint for media type/status reporting */ ifmedia_init(&lio->ifmedia, IFM_IMASK, lio_ifmedia_update, lio_ifmedia_status); /* set the default interface values */ ifmedia_add(&lio->ifmedia, (IFM_ETHER | IFM_FDX | lio_get_media_subtype(oct)), 0, NULL); ifmedia_add(&lio->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL); ifmedia_set(&lio->ifmedia, (IFM_ETHER | IFM_AUTO)); lio->ifmedia.ifm_media = lio->ifmedia.ifm_cur->ifm_media; lio_dev_dbg(oct, "IFMEDIA flags : %x\n", lio->ifmedia.ifm_media); if_initname(ifp, device_get_name(oct->device), device_get_unit(oct->device)); if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST)); if_setioctlfn(ifp, lio_ioctl); if_setgetcounterfn(ifp, lio_get_counter); if_settransmitfn(ifp, lio_mq_start); if_setqflushfn(ifp, lio_qflush); if_setinitfn(ifp, lio_open); if_setmtu(ifp, lio->linfo.link.s.mtu); lio->mtu = lio->linfo.link.s.mtu; if_sethwassist(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)); if_setcapabilitiesbit(ifp, (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_TSO | IFCAP_LRO | IFCAP_JUMBO_MTU | IFCAP_HWSTATS | IFCAP_LINKSTATE | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU), 0); if_setcapenable(ifp, if_getcapabilities(ifp)); if_setbaudrate(ifp, lio_get_baudrate(oct)); return (0); } static void lio_tcp_lro_free(struct octeon_device *octeon_dev, struct ifnet *ifp) { struct lio *lio = if_getsoftc(ifp); struct lio_droq *droq; int q_no; int i; for (i = 0; i < octeon_dev->num_oqs; i++) { q_no = lio->linfo.rxpciq[i].s.q_no; droq = octeon_dev->droq[q_no]; if (droq->lro.ifp) { tcp_lro_free(&droq->lro); droq->lro.ifp = NULL; } } } static int lio_tcp_lro_init(struct octeon_device *octeon_dev, struct ifnet *ifp) { struct lio *lio = if_getsoftc(ifp); struct lio_droq *droq; struct lro_ctrl *lro; int i, q_no, ret = 0; for (i = 0; i < octeon_dev->num_oqs; i++) { q_no = lio->linfo.rxpciq[i].s.q_no; droq = octeon_dev->droq[q_no]; lro = &droq->lro; ret = tcp_lro_init(lro); if (ret) { lio_dev_err(octeon_dev, "LRO Initialization failed ret %d\n", ret); goto lro_init_failed; } lro->ifp = ifp; } return (ret); lro_init_failed: lio_tcp_lro_free(octeon_dev, ifp); return (ret); } static int lio_setup_nic_devices(struct octeon_device *octeon_dev) { union octeon_if_cfg if_cfg; struct lio *lio = NULL; struct ifnet *ifp = NULL; struct lio_version *vdata; struct lio_soft_command *sc; struct lio_if_cfg_context *ctx; struct lio_if_cfg_resp *resp; struct lio_if_props *props; int num_iqueues, num_oqueues, retval; unsigned int base_queue; unsigned int gmx_port_id; uint32_t ctx_size, data_size; uint32_t ifidx_or_pfnum, resp_size; uint8_t mac[ETHER_HDR_LEN], i, j; /* This is to handle link status changes */ lio_register_dispatch_fn(octeon_dev, LIO_OPCODE_NIC, LIO_OPCODE_NIC_INFO, lio_link_info, octeon_dev); for (i = 0; i < octeon_dev->ifcount; i++) { resp_size = sizeof(struct lio_if_cfg_resp); ctx_size = sizeof(struct lio_if_cfg_context); data_size = sizeof(struct lio_version); sc = lio_alloc_soft_command(octeon_dev, data_size, resp_size, ctx_size); if (sc == NULL) return (ENOMEM); resp = (struct lio_if_cfg_resp *)sc->virtrptr; ctx = (struct lio_if_cfg_context *)sc->ctxptr; vdata = (struct lio_version *)sc->virtdptr; *((uint64_t *)vdata) = 0; vdata->major = htobe16(LIO_BASE_MAJOR_VERSION); vdata->minor = htobe16(LIO_BASE_MINOR_VERSION); vdata->micro = htobe16(LIO_BASE_MICRO_VERSION); num_iqueues = octeon_dev->sriov_info.num_pf_rings; num_oqueues = octeon_dev->sriov_info.num_pf_rings; base_queue = octeon_dev->sriov_info.pf_srn; gmx_port_id = octeon_dev->pf_num; ifidx_or_pfnum = octeon_dev->pf_num; lio_dev_dbg(octeon_dev, "requesting config for interface %d, iqs %d, oqs %d\n", ifidx_or_pfnum, num_iqueues, num_oqueues); ctx->cond = 0; ctx->octeon_id = lio_get_device_id(octeon_dev); if_cfg.if_cfg64 = 0; if_cfg.s.num_iqueues = num_iqueues; if_cfg.s.num_oqueues = num_oqueues; if_cfg.s.base_queue = base_queue; if_cfg.s.gmx_port_id = gmx_port_id; sc->iq_no = 0; lio_prepare_soft_command(octeon_dev, sc, LIO_OPCODE_NIC, LIO_OPCODE_NIC_IF_CFG, 0, if_cfg.if_cfg64, 0); sc->callback = lio_if_cfg_callback; sc->callback_arg = sc; sc->wait_time = 3000; retval = lio_send_soft_command(octeon_dev, sc); if (retval == LIO_IQ_SEND_FAILED) { lio_dev_err(octeon_dev, "iq/oq config failed status: %x\n", retval); /* Soft instr is freed by driver in case of failure. */ goto setup_nic_dev_fail; } /* * Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ lio_sleep_cond(octeon_dev, &ctx->cond); retval = resp->status; if (retval) { lio_dev_err(octeon_dev, "iq/oq config failed\n"); goto setup_nic_dev_fail; } lio_swap_8B_data((uint64_t *)(&resp->cfg_info), (sizeof(struct octeon_if_cfg_info)) >> 3); num_iqueues = bitcount64(resp->cfg_info.iqmask); num_oqueues = bitcount64(resp->cfg_info.oqmask); if (!(num_iqueues) || !(num_oqueues)) { lio_dev_err(octeon_dev, - "Got bad iqueues (%016lX) or oqueues (%016lX) from firmware.\n", - resp->cfg_info.iqmask, - resp->cfg_info.oqmask); + "Got bad iqueues (%016llX) or oqueues (%016llX) from firmware.\n", + LIO_CAST64(resp->cfg_info.iqmask), + LIO_CAST64(resp->cfg_info.oqmask)); goto setup_nic_dev_fail; } lio_dev_dbg(octeon_dev, - "interface %d, iqmask %016lx, oqmask %016lx, numiqueues %d, numoqueues %d\n", - i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, + "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", + i, LIO_CAST64(resp->cfg_info.iqmask), + LIO_CAST64(resp->cfg_info.oqmask), num_iqueues, num_oqueues); ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { lio_dev_err(octeon_dev, "Device allocation failed\n"); goto setup_nic_dev_fail; } lio = malloc(sizeof(struct lio), M_DEVBUF, M_NOWAIT | M_ZERO); if (lio == NULL) { lio_dev_err(octeon_dev, "Lio allocation failed\n"); goto setup_nic_dev_fail; } if_setsoftc(ifp, lio); ifp->if_hw_tsomax = LIO_MAX_FRAME_SIZE; ifp->if_hw_tsomaxsegcount = LIO_MAX_SG; ifp->if_hw_tsomaxsegsize = PAGE_SIZE; lio->ifidx = ifidx_or_pfnum; props = &octeon_dev->props; props->gmxport = resp->cfg_info.linfo.gmxport; props->ifp = ifp; lio->linfo.num_rxpciq = num_oqueues; lio->linfo.num_txpciq = num_iqueues; for (j = 0; j < num_oqueues; j++) { lio->linfo.rxpciq[j].rxpciq64 = resp->cfg_info.linfo.rxpciq[j].rxpciq64; } for (j = 0; j < num_iqueues; j++) { lio->linfo.txpciq[j].txpciq64 = resp->cfg_info.linfo.txpciq[j].txpciq64; } lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; lio->linfo.link.link_status64 = resp->cfg_info.linfo.link.link_status64; /* * Point to the properties for octeon device to which this * interface belongs. */ lio->oct_dev = octeon_dev; lio->ifp = ifp; lio_dev_dbg(octeon_dev, "if%d gmx: %d hw_addr: 0x%llx\n", i, lio->linfo.gmxport, LIO_CAST64(lio->linfo.hw_addr)); lio_init_ifnet(lio); /* 64-bit swap required on LE machines */ lio_swap_8B_data(&lio->linfo.hw_addr, 1); for (j = 0; j < 6; j++) mac[j] = *((uint8_t *)( ((uint8_t *)&lio->linfo.hw_addr) + 2 + j)); ether_ifattach(ifp, mac); /* * By default all interfaces on a single Octeon uses the same * tx and rx queues */ lio->txq = lio->linfo.txpciq[0].s.q_no; lio->rxq = lio->linfo.rxpciq[0].s.q_no; if (lio_setup_io_queues(octeon_dev, i, lio->linfo.num_txpciq, lio->linfo.num_rxpciq)) { lio_dev_err(octeon_dev, "I/O queues creation failed\n"); goto setup_nic_dev_fail; } lio_ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); lio->tx_qsize = lio_get_tx_qsize(octeon_dev, lio->txq); lio->rx_qsize = lio_get_rx_qsize(octeon_dev, lio->rxq); if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { lio_dev_err(octeon_dev, "Gather list allocation failed\n"); goto setup_nic_dev_fail; } if ((lio_hwlro == 0) && lio_tcp_lro_init(octeon_dev, ifp)) goto setup_nic_dev_fail; if (lio_hwlro && (if_getcapenable(ifp) & IFCAP_LRO) && (if_getcapenable(ifp) & IFCAP_RXCSUM) && (if_getcapenable(ifp) & IFCAP_RXCSUM_IPV6)) lio_set_feature(ifp, LIO_CMD_LRO_ENABLE, LIO_LROIPV4 | LIO_LROIPV6); if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)) lio_set_feature(ifp, LIO_CMD_VLAN_FILTER_CTL, 1); else lio_set_feature(ifp, LIO_CMD_VLAN_FILTER_CTL, 0); if (lio_setup_rx_oom_poll_fn(ifp)) goto setup_nic_dev_fail; lio_dev_dbg(octeon_dev, "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); lio->link_changes++; lio_ifstate_set(lio, LIO_IFSTATE_REGISTERED); /* * Sending command to firmware to enable Rx checksum offload * by default at the time of setup of Liquidio driver for * this device */ lio_set_rxcsum_command(ifp, LIO_CMD_TNL_RX_CSUM_CTL, LIO_CMD_RXCSUM_ENABLE); lio_set_feature(ifp, LIO_CMD_TNL_TX_CSUM_CTL, LIO_CMD_TXCSUM_ENABLE); #ifdef RSS if (lio_rss) { if (lio_send_rss_param(lio)) goto setup_nic_dev_fail; } else #endif /* RSS */ lio_set_feature(ifp, LIO_CMD_SET_FNV, LIO_CMD_FNV_ENABLE); lio_dev_dbg(octeon_dev, "NIC ifidx:%d Setup successful\n", i); lio_free_soft_command(octeon_dev, sc); lio->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, lio_vlan_rx_add_vid, lio, EVENTHANDLER_PRI_FIRST); lio->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, lio_vlan_rx_kill_vid, lio, EVENTHANDLER_PRI_FIRST); /* Update stats periodically */ callout_init(&lio->stats_timer, 0); lio->stats_interval = LIO_DEFAULT_STATS_INTERVAL; lio_add_hw_stats(lio); } return (0); setup_nic_dev_fail: lio_free_soft_command(octeon_dev, sc); while (i--) { lio_dev_err(octeon_dev, "NIC ifidx:%d Setup failed\n", i); lio_destroy_nic_device(octeon_dev, i); } return (ENODEV); } static int lio_link_info(struct lio_recv_info *recv_info, void *ptr) { struct octeon_device *oct = (struct octeon_device *)ptr; struct lio_recv_pkt *recv_pkt = recv_info->recv_pkt; union octeon_link_status *ls; int gmxport = 0, i; lio_dev_dbg(oct, "%s Called\n", __func__); if (recv_pkt->buffer_size[0] != (sizeof(*ls) + LIO_DROQ_INFO_SIZE)) { lio_dev_err(oct, "Malformed NIC_INFO, len=%d, ifidx=%d\n", recv_pkt->buffer_size[0], recv_pkt->rh.r_nic_info.gmxport); goto nic_info_err; } gmxport = recv_pkt->rh.r_nic_info.gmxport; ls = (union octeon_link_status *)(recv_pkt->buffer_ptr[0]->m_data + LIO_DROQ_INFO_SIZE); lio_swap_8B_data((uint64_t *)ls, (sizeof(union octeon_link_status)) >> 3); if (oct->props.gmxport == gmxport) lio_update_link_status(oct->props.ifp, ls); nic_info_err: for (i = 0; i < recv_pkt->buffer_count; i++) lio_recv_buffer_free(recv_pkt->buffer_ptr[i]); lio_free_recv_info(recv_info); return (0); } void lio_free_mbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo) { bus_dmamap_sync(iq->txtag, finfo->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(iq->txtag, finfo->map); m_freem(finfo->mb); } void lio_free_sgmbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo) { struct lio_gather *g; struct octeon_device *oct; struct lio *lio; int iq_no; g = finfo->g; iq_no = iq->txpciq.s.q_no; oct = iq->oct_dev; lio = if_getsoftc(oct->props.ifp); mtx_lock(&lio->glist_lock[iq_no]); STAILQ_INSERT_TAIL(&lio->ghead[iq_no], &g->node, entries); mtx_unlock(&lio->glist_lock[iq_no]); bus_dmamap_sync(iq->txtag, finfo->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(iq->txtag, finfo->map); m_freem(finfo->mb); } static void lio_if_cfg_callback(struct octeon_device *oct, uint32_t status, void *buf) { struct lio_soft_command *sc = (struct lio_soft_command *)buf; struct lio_if_cfg_resp *resp; struct lio_if_cfg_context *ctx; resp = (struct lio_if_cfg_resp *)sc->virtrptr; ctx = (struct lio_if_cfg_context *)sc->ctxptr; oct = lio_get_device(ctx->octeon_id); if (resp->status) lio_dev_err(oct, "nic if cfg instruction failed. Status: %llx (0x%08x)\n", LIO_CAST64(resp->status), status); ctx->cond = 1; snprintf(oct->fw_info.lio_firmware_version, 32, "%s", resp->cfg_info.lio_firmware_version); /* * This barrier is required to be sure that the response has been * written fully before waking up the handler */ wmb(); } static int lio_is_mac_changed(uint8_t *new, uint8_t *old) { return ((new[0] != old[0]) || (new[1] != old[1]) || (new[2] != old[2]) || (new[3] != old[3]) || (new[4] != old[4]) || (new[5] != old[5])); } void lio_open(void *arg) { struct lio *lio = arg; struct ifnet *ifp = lio->ifp; struct octeon_device *oct = lio->oct_dev; uint8_t *mac_new, mac_old[ETHER_HDR_LEN]; int ret = 0; lio_ifstate_set(lio, LIO_IFSTATE_RUNNING); /* Ready for link status updates */ lio->intf_open = 1; lio_dev_info(oct, "Interface Open, ready for traffic\n"); /* tell Octeon to start forwarding packets to host */ lio_send_rx_ctrl_cmd(lio, 1); mac_new = IF_LLADDR(ifp); memcpy(mac_old, ((uint8_t *)&lio->linfo.hw_addr) + 2, ETHER_HDR_LEN); if (lio_is_mac_changed(mac_new, mac_old)) { ret = lio_set_mac(ifp, mac_new); if (ret) lio_dev_err(oct, "MAC change failed, error: %d\n", ret); } /* Now inform the stack we're ready */ if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); lio_dev_info(oct, "Interface is opened\n"); } static int lio_set_rxcsum_command(struct ifnet *ifp, int command, uint8_t rx_cmd) { struct lio_ctrl_pkt nctrl; struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; int ret = 0; nctrl.ncmd.cmd64 = 0; nctrl.ncmd.s.cmd = command; nctrl.ncmd.s.param1 = rx_cmd; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 100; nctrl.lio = lio; nctrl.cb_fn = lio_ctrl_cmd_completion; ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { lio_dev_err(oct, "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n", ret); } return (ret); } static int lio_stop_nic_module(struct octeon_device *oct) { int i, j; struct lio *lio; lio_dev_dbg(oct, "Stopping network interfaces\n"); if (!oct->ifcount) { lio_dev_err(oct, "Init for Octeon was not completed\n"); return (1); } mtx_lock(&oct->cmd_resp_wqlock); oct->cmd_resp_state = LIO_DRV_OFFLINE; mtx_unlock(&oct->cmd_resp_wqlock); for (i = 0; i < oct->ifcount; i++) { lio = if_getsoftc(oct->props.ifp); for (j = 0; j < oct->num_oqs; j++) lio_unregister_droq_ops(oct, lio->linfo.rxpciq[j].s.q_no); } callout_drain(&lio->stats_timer); for (i = 0; i < oct->ifcount; i++) lio_destroy_nic_device(oct, i); lio_dev_dbg(oct, "Network interface stopped\n"); return (0); } static void lio_delete_glists(struct octeon_device *oct, struct lio *lio) { struct lio_gather *g; int i; if (lio->glist_lock != NULL) { free((void *)lio->glist_lock, M_DEVBUF); lio->glist_lock = NULL; } if (lio->ghead == NULL) return; for (i = 0; i < lio->linfo.num_txpciq; i++) { do { g = (struct lio_gather *) lio_delete_first_node(&lio->ghead[i]); free(g, M_DEVBUF); } while (g); if ((lio->glists_virt_base != NULL) && (lio->glists_virt_base[i] != NULL)) { lio_dma_free(lio->glist_entry_size * lio->tx_qsize, lio->glists_virt_base[i]); } } free(lio->glists_virt_base, M_DEVBUF); lio->glists_virt_base = NULL; free(lio->glists_dma_base, M_DEVBUF); lio->glists_dma_base = NULL; free(lio->ghead, M_DEVBUF); lio->ghead = NULL; } static int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) { struct lio_gather *g; int i, j; lio->glist_lock = malloc(num_iqs * sizeof(*lio->glist_lock), M_DEVBUF, M_NOWAIT | M_ZERO); if (lio->glist_lock == NULL) return (1); lio->ghead = malloc(num_iqs * sizeof(*lio->ghead), M_DEVBUF, M_NOWAIT | M_ZERO); if (lio->ghead == NULL) { free((void *)lio->glist_lock, M_DEVBUF); lio->glist_lock = NULL; return (1); } lio->glist_entry_size = ROUNDUP8((ROUNDUP4(LIO_MAX_SG) >> 2) * LIO_SG_ENTRY_SIZE); /* * allocate memory to store virtual and dma base address of * per glist consistent memory */ lio->glists_virt_base = malloc(num_iqs * sizeof(void *), M_DEVBUF, M_NOWAIT | M_ZERO); lio->glists_dma_base = malloc(num_iqs * sizeof(vm_paddr_t), M_DEVBUF, M_NOWAIT | M_ZERO); if ((lio->glists_virt_base == NULL) || (lio->glists_dma_base == NULL)) { lio_delete_glists(oct, lio); return (1); } for (i = 0; i < num_iqs; i++) { mtx_init(&lio->glist_lock[i], "glist_lock", NULL, MTX_DEF); STAILQ_INIT(&lio->ghead[i]); lio->glists_virt_base[i] = lio_dma_alloc(lio->glist_entry_size * lio->tx_qsize, (vm_paddr_t *)&lio->glists_dma_base[i]); if (lio->glists_virt_base[i] == NULL) { lio_delete_glists(oct, lio); return (1); } for (j = 0; j < lio->tx_qsize; j++) { g = malloc(sizeof(*g), M_DEVBUF, M_NOWAIT | M_ZERO); if (g == NULL) break; g->sg = (struct lio_sg_entry *) ((uint64_t)lio->glists_virt_base[i] + (j * lio->glist_entry_size)); g->sg_dma_ptr = (uint64_t)lio->glists_dma_base[i] + (j * lio->glist_entry_size); STAILQ_INSERT_TAIL(&lio->ghead[i], &g->node, entries); } if (j != lio->tx_qsize) { lio_delete_glists(oct, lio); return (1); } } return (0); } void lio_stop(struct ifnet *ifp) { struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; lio_ifstate_reset(lio, LIO_IFSTATE_RUNNING); if_link_state_change(ifp, LINK_STATE_DOWN); lio->intf_open = 0; lio->linfo.link.s.link_up = 0; lio->link_changes++; lio_send_rx_ctrl_cmd(lio, 0); /* Tell the stack that the interface is no longer active */ if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); lio_dev_info(oct, "Interface is stopped\n"); } static void lio_check_rx_oom_status(struct lio *lio) { struct lio_droq *droq; struct octeon_device *oct = lio->oct_dev; int desc_refilled; int q, q_no = 0; for (q = 0; q < oct->num_oqs; q++) { q_no = lio->linfo.rxpciq[q].s.q_no; droq = oct->droq[q_no]; if (droq == NULL) continue; if (lio_read_csr32(oct, droq->pkts_credit_reg) <= 0x40) { mtx_lock(&droq->lock); desc_refilled = lio_droq_refill(oct, droq); /* * Flush the droq descriptor data to memory to be sure * that when we update the credits the data in memory * is accurate. */ wmb(); lio_write_csr32(oct, droq->pkts_credit_reg, desc_refilled); /* make sure mmio write completes */ __compiler_membar(); mtx_unlock(&droq->lock); } } } static void lio_poll_check_rx_oom_status(void *arg, int pending __unused) { struct lio_tq *rx_status_tq = arg; struct lio *lio = rx_status_tq->ctxptr; if (lio_ifstate_check(lio, LIO_IFSTATE_RUNNING)) lio_check_rx_oom_status(lio); taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work, lio_ms_to_ticks(50)); } static int lio_setup_rx_oom_poll_fn(struct ifnet *ifp) { struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; struct lio_tq *rx_status_tq; rx_status_tq = &lio->rx_status_tq; rx_status_tq->tq = taskqueue_create("lio_rx_oom_status", M_WAITOK, taskqueue_thread_enqueue, &rx_status_tq->tq); if (rx_status_tq->tq == NULL) { lio_dev_err(oct, "unable to create lio rx oom status tq\n"); return (-1); } TIMEOUT_TASK_INIT(rx_status_tq->tq, &rx_status_tq->work, 0, lio_poll_check_rx_oom_status, (void *)rx_status_tq); rx_status_tq->ctxptr = lio; taskqueue_start_threads(&rx_status_tq->tq, 1, PI_NET, "lio%d_rx_oom_status", oct->octeon_id); taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work, lio_ms_to_ticks(50)); return (0); } static void lio_cleanup_rx_oom_poll_fn(struct ifnet *ifp) { struct lio *lio = if_getsoftc(ifp); if (lio->rx_status_tq.tq != NULL) { while (taskqueue_cancel_timeout(lio->rx_status_tq.tq, &lio->rx_status_tq.work, NULL)) taskqueue_drain_timeout(lio->rx_status_tq.tq, &lio->rx_status_tq.work); taskqueue_free(lio->rx_status_tq.tq); lio->rx_status_tq.tq = NULL; } } static void lio_destroy_nic_device(struct octeon_device *oct, int ifidx) { struct ifnet *ifp = oct->props.ifp; struct lio *lio; if (ifp == NULL) { lio_dev_err(oct, "%s No ifp ptr for index %d\n", __func__, ifidx); return; } lio = if_getsoftc(ifp); lio_ifstate_set(lio, LIO_IFSTATE_DETACH); lio_dev_dbg(oct, "NIC device cleanup\n"); if (atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_RUNNING) lio_stop(ifp); if (lio_wait_for_pending_requests(oct)) lio_dev_err(oct, "There were pending requests\n"); if (lio_wait_for_instr_fetch(oct)) lio_dev_err(oct, "IQ had pending instructions\n"); if (lio_wait_for_oq_pkts(oct)) lio_dev_err(oct, "OQ had pending packets\n"); if (atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_REGISTERED) ether_ifdetach(ifp); lio_tcp_lro_free(oct, ifp); lio_cleanup_rx_oom_poll_fn(ifp); lio_delete_glists(oct, lio); EVENTHANDLER_DEREGISTER(vlan_config, lio->vlan_attach); EVENTHANDLER_DEREGISTER(vlan_unconfig, lio->vlan_detach); free(lio, M_DEVBUF); if_free(ifp); oct->props.gmxport = -1; oct->props.ifp = NULL; } static void print_link_info(struct ifnet *ifp) { struct lio *lio = if_getsoftc(ifp); if (!lio_ifstate_check(lio, LIO_IFSTATE_RESETTING) && lio_ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { struct octeon_link_info *linfo = &lio->linfo; if (linfo->link.s.link_up) { lio_dev_info(lio->oct_dev, "%d Mbps %s Duplex UP\n", linfo->link.s.speed, (linfo->link.s.duplex) ? "Full" : "Half"); } else { lio_dev_info(lio->oct_dev, "Link Down\n"); } } } static inline void lio_update_link_status(struct ifnet *ifp, union octeon_link_status *ls) { struct lio *lio = if_getsoftc(ifp); int changed = (lio->linfo.link.link_status64 != ls->link_status64); lio->linfo.link.link_status64 = ls->link_status64; if ((lio->intf_open) && (changed)) { print_link_info(ifp); lio->link_changes++; if (lio->linfo.link.s.link_up) if_link_state_change(ifp, LINK_STATE_UP); else if_link_state_change(ifp, LINK_STATE_DOWN); } } /* * \brief Callback for rx ctrl * @param status status of request * @param buf pointer to resp structure */ static void lio_rx_ctl_callback(struct octeon_device *oct, uint32_t status, void *buf) { struct lio_soft_command *sc = (struct lio_soft_command *)buf; struct lio_rx_ctl_context *ctx; ctx = (struct lio_rx_ctl_context *)sc->ctxptr; oct = lio_get_device(ctx->octeon_id); if (status) lio_dev_err(oct, "rx ctl instruction failed. Status: %llx\n", LIO_CAST64(status)); ctx->cond = 1; /* * This barrier is required to be sure that the response has been * written fully before waking up the handler */ wmb(); } static void lio_send_rx_ctrl_cmd(struct lio *lio, int start_stop) { struct lio_soft_command *sc; struct lio_rx_ctl_context *ctx; union octeon_cmd *ncmd; struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; int ctx_size = sizeof(struct lio_rx_ctl_context); int retval; if (oct->props.rx_on == start_stop) return; sc = lio_alloc_soft_command(oct, OCTEON_CMD_SIZE, 16, ctx_size); if (sc == NULL) return; ncmd = (union octeon_cmd *)sc->virtdptr; ctx = (struct lio_rx_ctl_context *)sc->ctxptr; ctx->cond = 0; ctx->octeon_id = lio_get_device_id(oct); ncmd->cmd64 = 0; ncmd->s.cmd = LIO_CMD_RX_CTL; ncmd->s.param1 = start_stop; lio_swap_8B_data((uint64_t *)ncmd, (OCTEON_CMD_SIZE >> 3)); sc->iq_no = lio->linfo.txpciq[0].s.q_no; lio_prepare_soft_command(oct, sc, LIO_OPCODE_NIC, LIO_OPCODE_NIC_CMD, 0, 0, 0); sc->callback = lio_rx_ctl_callback; sc->callback_arg = sc; sc->wait_time = 5000; retval = lio_send_soft_command(oct, sc); if (retval == LIO_IQ_SEND_FAILED) { lio_dev_err(oct, "Failed to send RX Control message\n"); } else { /* * Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ lio_sleep_cond(oct, &ctx->cond); oct->props.rx_on = start_stop; } lio_free_soft_command(oct, sc); } static void lio_vlan_rx_add_vid(void *arg, struct ifnet *ifp, uint16_t vid) { struct lio_ctrl_pkt nctrl; struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; int ret = 0; if (if_getsoftc(ifp) != arg) /* Not our event */ return; if ((vid == 0) || (vid > 4095)) /* Invalid */ return; bzero(&nctrl, sizeof(struct lio_ctrl_pkt)); nctrl.ncmd.cmd64 = 0; nctrl.ncmd.s.cmd = LIO_CMD_ADD_VLAN_FILTER; nctrl.ncmd.s.param1 = vid; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 100; nctrl.lio = lio; nctrl.cb_fn = lio_ctrl_cmd_completion; ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { lio_dev_err(oct, "Add VLAN filter failed in core (ret: 0x%x)\n", ret); } } static void lio_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, uint16_t vid) { struct lio_ctrl_pkt nctrl; struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; int ret = 0; if (if_getsoftc(ifp) != arg) /* Not our event */ return; if ((vid == 0) || (vid > 4095)) /* Invalid */ return; bzero(&nctrl, sizeof(struct lio_ctrl_pkt)); nctrl.ncmd.cmd64 = 0; nctrl.ncmd.s.cmd = LIO_CMD_DEL_VLAN_FILTER; nctrl.ncmd.s.param1 = vid; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 100; nctrl.lio = lio; nctrl.cb_fn = lio_ctrl_cmd_completion; ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { lio_dev_err(oct, "Kill VLAN filter failed in core (ret: 0x%x)\n", ret); } } static int lio_wait_for_oq_pkts(struct octeon_device *oct) { int i, pending_pkts, pkt_cnt = 0, retry = 100; do { pending_pkts = 0; for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) { if (!(oct->io_qmask.oq & BIT_ULL(i))) continue; pkt_cnt = lio_droq_check_hw_for_pkts(oct->droq[i]); if (pkt_cnt > 0) { pending_pkts += pkt_cnt; taskqueue_enqueue(oct->droq[i]->droq_taskqueue, &oct->droq[i]->droq_task); } } pkt_cnt = 0; lio_sleep_timeout(1); } while (retry-- && pending_pkts); return (pkt_cnt); } static void lio_destroy_resources(struct octeon_device *oct) { int i, refcount; switch (atomic_load_acq_int(&oct->status)) { case LIO_DEV_RUNNING: case LIO_DEV_CORE_OK: /* No more instructions will be forwarded. */ atomic_store_rel_int(&oct->status, LIO_DEV_IN_RESET); oct->app_mode = LIO_DRV_INVALID_APP; lio_dev_dbg(oct, "Device state is now %s\n", lio_get_state_string(&oct->status)); lio_sleep_timeout(100); /* fallthrough */ case LIO_DEV_HOST_OK: /* fallthrough */ case LIO_DEV_CONSOLE_INIT_DONE: /* Remove any consoles */ lio_remove_consoles(oct); /* fallthrough */ case LIO_DEV_IO_QUEUES_DONE: if (lio_wait_for_pending_requests(oct)) lio_dev_err(oct, "There were pending requests\n"); if (lio_wait_for_instr_fetch(oct)) lio_dev_err(oct, "IQ had pending instructions\n"); /* * Disable the input and output queues now. No more packets will * arrive from Octeon, but we should wait for all packet * processing to finish. */ oct->fn_list.disable_io_queues(oct); if (lio_wait_for_oq_pkts(oct)) lio_dev_err(oct, "OQ had pending packets\n"); /* fallthrough */ case LIO_DEV_INTR_SET_DONE: /* Disable interrupts */ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); if (oct->msix_on) { for (i = 0; i < oct->num_msix_irqs - 1; i++) { if (oct->ioq_vector[i].tag != NULL) { bus_teardown_intr(oct->device, oct->ioq_vector[i].msix_res, oct->ioq_vector[i].tag); oct->ioq_vector[i].tag = NULL; } if (oct->ioq_vector[i].msix_res != NULL) { bus_release_resource(oct->device, SYS_RES_IRQ, oct->ioq_vector[i].vector, oct->ioq_vector[i].msix_res); oct->ioq_vector[i].msix_res = NULL; } } /* non-iov vector's argument is oct struct */ if (oct->tag != NULL) { bus_teardown_intr(oct->device, oct->msix_res, oct->tag); oct->tag = NULL; } if (oct->msix_res != NULL) { bus_release_resource(oct->device, SYS_RES_IRQ, oct->aux_vector, oct->msix_res); oct->msix_res = NULL; } pci_release_msi(oct->device); } /* fallthrough */ case LIO_DEV_IN_RESET: case LIO_DEV_DROQ_INIT_DONE: /* Wait for any pending operations */ lio_mdelay(100); for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) { if (!(oct->io_qmask.oq & BIT_ULL(i))) continue; lio_delete_droq(oct, i); } /* fallthrough */ case LIO_DEV_RESP_LIST_INIT_DONE: for (i = 0; i < LIO_MAX_POSSIBLE_OUTPUT_QUEUES; i++) { if (oct->droq[i] != NULL) { free(oct->droq[i], M_DEVBUF); oct->droq[i] = NULL; } } lio_delete_response_list(oct); /* fallthrough */ case LIO_DEV_INSTR_QUEUE_INIT_DONE: for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) { if (!(oct->io_qmask.iq & BIT_ULL(i))) continue; lio_delete_instr_queue(oct, i); } /* fallthrough */ case LIO_DEV_MSIX_ALLOC_VECTOR_DONE: for (i = 0; i < LIO_MAX_POSSIBLE_INSTR_QUEUES; i++) { if (oct->instr_queue[i] != NULL) { free(oct->instr_queue[i], M_DEVBUF); oct->instr_queue[i] = NULL; } } lio_free_ioq_vector(oct); /* fallthrough */ case LIO_DEV_SC_BUFF_POOL_INIT_DONE: lio_free_sc_buffer_pool(oct); /* fallthrough */ case LIO_DEV_DISPATCH_INIT_DONE: lio_delete_dispatch_list(oct); /* fallthrough */ case LIO_DEV_PCI_MAP_DONE: refcount = lio_deregister_device(oct); if (fw_type_is_none()) lio_pci_flr(oct); if (!refcount) oct->fn_list.soft_reset(oct); lio_unmap_pci_barx(oct, 0); lio_unmap_pci_barx(oct, 1); /* fallthrough */ case LIO_DEV_PCI_ENABLE_DONE: /* Disable the device, releasing the PCI INT */ pci_disable_busmaster(oct->device); /* fallthrough */ case LIO_DEV_BEGIN_STATE: break; } /* end switch (oct->status) */ } Index: head/sys/dev/liquidio/lio_main.h =================================================================== --- head/sys/dev/liquidio/lio_main.h (revision 324993) +++ head/sys/dev/liquidio/lio_main.h (revision 324994) @@ -1,145 +1,141 @@ /* * BSD LICENSE * * Copyright(c) 2017 Cavium, Inc.. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Cavium, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ /* * \file lio_main.h * \brief Host Driver: This file is included by all host driver source files * to include common definitions. */ #ifndef _LIO_MAIN_H_ #define _LIO_MAIN_H_ extern unsigned int lio_hwlro; -#ifdef __LP64__ #define LIO_CAST64(v) ((long long)(long)(v)) -#else -#error "Unknown system architecture" -#endif #define LIO_DRV_NAME "lio" /** Swap 8B blocks */ static inline void lio_swap_8B_data(uint64_t *data, uint32_t blocks) { while (blocks) { *data = htobe64(*data); blocks--; data++; } } /* * \brief unmaps a PCI BAR * @param oct Pointer to Octeon device * @param baridx bar index */ static inline void lio_unmap_pci_barx(struct octeon_device *oct, int baridx) { lio_dev_dbg(oct, "Freeing PCI mapped regions for Bar%d\n", baridx); if (oct->mem_bus_space[baridx].pci_mem != NULL) { bus_release_resource(oct->device, SYS_RES_MEMORY, PCIR_BAR(baridx * 2), oct->mem_bus_space[baridx].pci_mem); oct->mem_bus_space[baridx].pci_mem = NULL; } } /* * \brief maps a PCI BAR * @param oct Pointer to Octeon device * @param baridx bar index */ static inline int lio_map_pci_barx(struct octeon_device *oct, int baridx) { int rid = PCIR_BAR(baridx * 2); oct->mem_bus_space[baridx].pci_mem = bus_alloc_resource_any(oct->device, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (oct->mem_bus_space[baridx].pci_mem == NULL) { lio_dev_err(oct, "Unable to allocate bus resource: memory\n"); return (ENXIO); } /* Save bus_space values for READ/WRITE_REG macros */ oct->mem_bus_space[baridx].tag = rman_get_bustag(oct->mem_bus_space[baridx].pci_mem); oct->mem_bus_space[baridx].handle = rman_get_bushandle(oct->mem_bus_space[baridx].pci_mem); - lio_dev_dbg(oct, "BAR%d Tag 0x%lx Handle 0x%lx\n", - baridx, oct->mem_bus_space[baridx].tag, - oct->mem_bus_space[baridx].handle); + lio_dev_dbg(oct, "BAR%d Tag 0x%llx Handle 0x%llx\n", + baridx, LIO_CAST64(oct->mem_bus_space[baridx].tag), + LIO_CAST64(oct->mem_bus_space[baridx].handle)); return (0); } static inline void lio_sleep_cond(struct octeon_device *oct, volatile int *condition) { while (!(*condition)) { lio_mdelay(1); lio_flush_iq(oct, oct->instr_queue[0], 0); lio_process_ordered_list(oct, 0); } } int lio_console_debug_enabled(uint32_t console); #ifndef ROUNDUP4 #define ROUNDUP4(val) (((val) + 3) & 0xfffffffc) #endif #ifndef ROUNDUP8 #define ROUNDUP8(val) (((val) + 7) & 0xfffffff8) #endif #define BIT_ULL(nr) (1ULL << (nr)) void lio_free_mbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo); void lio_free_sgmbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo); #endif /* _LIO_MAIN_H_ */ Index: head/sys/dev/liquidio/lio_rxtx.c =================================================================== --- head/sys/dev/liquidio/lio_rxtx.c (revision 324993) +++ head/sys/dev/liquidio/lio_rxtx.c (revision 324994) @@ -1,325 +1,325 @@ /* * BSD LICENSE * * Copyright(c) 2017 Cavium, Inc.. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Cavium, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "lio_bsd.h" #include "lio_common.h" #include "lio_droq.h" #include "lio_iq.h" #include "lio_response_manager.h" #include "lio_device.h" #include "lio_ctrl.h" #include "lio_main.h" #include "lio_network.h" #include "lio_rxtx.h" int lio_xmit(struct lio *lio, struct lio_instr_queue *iq, struct mbuf **m_headp) { struct lio_data_pkt ndata; union lio_cmd_setup cmdsetup; struct lio_mbuf_free_info *finfo = NULL; struct octeon_device *oct = iq->oct_dev; struct lio_iq_stats *stats; struct octeon_instr_irh *irh; struct lio_request_list *tx_buf; union lio_tx_info *tx_info; struct mbuf *m_head; bus_dma_segment_t segs[LIO_MAX_SG]; bus_dmamap_t map; uint64_t dptr = 0; uint32_t tag = 0; int iq_no = 0; int nsegs; int status = 0; iq_no = iq->txpciq.s.q_no; tag = iq_no; stats = &oct->instr_queue[iq_no]->stats; tx_buf = iq->request_list + iq->host_write_index; /* * Check for all conditions in which the current packet cannot be * transmitted. */ if (!(atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_RUNNING) || (!lio->linfo.link.s.link_up)) { lio_dev_info(oct, "Transmit failed link_status : %d\n", lio->linfo.link.s.link_up); status = ENETDOWN; goto drop_packet; } if (lio_iq_is_full(oct, iq_no)) { /* Defer sending if queue is full */ lio_dev_dbg(oct, "Transmit failed iq:%d full\n", iq_no); stats->tx_iq_busy++; return (ENOBUFS); } map = tx_buf->map; status = bus_dmamap_load_mbuf_sg(iq->txtag, map, *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); if (status == EFBIG) { struct mbuf *m; m = m_defrag(*m_headp, M_NOWAIT); if (m == NULL) { stats->mbuf_defrag_failed++; goto drop_packet; } *m_headp = m; status = bus_dmamap_load_mbuf_sg(iq->txtag, map, *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); } if (status == ENOMEM) { goto retry; } else if (status) { stats->tx_dmamap_fail++; lio_dev_dbg(oct, "bus_dmamap_load_mbuf_sg failed with error %d. iq:%d", status, iq_no); goto drop_packet; } m_head = *m_headp; /* Info used to unmap and free the buffers. */ finfo = &tx_buf->finfo; finfo->map = map; finfo->mb = m_head; /* Prepare the attributes for the data to be passed to OSI. */ bzero(&ndata, sizeof(struct lio_data_pkt)); ndata.buf = (void *)finfo; ndata.q_no = iq_no; ndata.datasize = m_head->m_pkthdr.len; cmdsetup.cmd_setup64 = 0; cmdsetup.s.iq_no = iq_no; if (m_head->m_pkthdr.csum_flags & CSUM_IP) cmdsetup.s.ip_csum = 1; if ((m_head->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)) || (m_head->m_pkthdr.csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP))) cmdsetup.s.transport_csum = 1; if (nsegs == 1) { cmdsetup.s.u.datasize = segs[0].ds_len; lio_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); dptr = segs[0].ds_addr; ndata.cmd.cmd3.dptr = dptr; ndata.reqtype = LIO_REQTYPE_NORESP_NET; } else { struct lio_gather *g; int i; mtx_lock(&lio->glist_lock[iq_no]); g = (struct lio_gather *) lio_delete_first_node(&lio->ghead[iq_no]); mtx_unlock(&lio->glist_lock[iq_no]); if (g == NULL) { lio_dev_err(oct, "Transmit scatter gather: glist null!\n"); goto retry; } cmdsetup.s.gather = 1; cmdsetup.s.u.gatherptrs = nsegs; lio_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); bzero(g->sg, g->sg_size); i = 0; while (nsegs--) { g->sg[(i >> 2)].ptr[(i & 3)] = segs[i].ds_addr; lio_add_sg_size(&g->sg[(i >> 2)], segs[i].ds_len, (i & 3)); i++; } dptr = g->sg_dma_ptr; ndata.cmd.cmd3.dptr = dptr; finfo->g = g; ndata.reqtype = LIO_REQTYPE_NORESP_NET_SG; } irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; tx_info = (union lio_tx_info *)&ndata.cmd.cmd3.ossp[0]; if (m_head->m_pkthdr.csum_flags & (CSUM_IP_TSO | CSUM_IP6_TSO)) { tx_info->s.gso_size = m_head->m_pkthdr.tso_segsz; tx_info->s.gso_segs = howmany(m_head->m_pkthdr.len, m_head->m_pkthdr.tso_segsz); stats->tx_gso++; } /* HW insert VLAN tag */ if (m_head->m_flags & M_VLANTAG) { irh->priority = m_head->m_pkthdr.ether_vtag >> 13; irh->vlan = m_head->m_pkthdr.ether_vtag & 0xfff; } status = lio_send_data_pkt(oct, &ndata); if (status == LIO_IQ_SEND_FAILED) goto retry; if (tx_info->s.gso_segs) stats->tx_done += tx_info->s.gso_segs; else stats->tx_done++; stats->tx_tot_bytes += ndata.datasize; return (0); retry: return (ENOBUFS); drop_packet: stats->tx_dropped++; - lio_dev_err(oct, "IQ%d Transmit dropped: %lu\n", iq_no, - stats->tx_dropped); + lio_dev_err(oct, "IQ%d Transmit dropped: %llu\n", iq_no, + LIO_CAST64(stats->tx_dropped)); m_freem(*m_headp); *m_headp = NULL; return (status); } int lio_mq_start_locked(struct ifnet *ifp, struct lio_instr_queue *iq) { struct lio *lio = if_getsoftc(ifp); struct mbuf *next; int err = 0; if (((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) || (!lio->linfo.link.s.link_up)) return (-ENETDOWN); /* Process the queue */ while ((next = drbr_peek(ifp, iq->br)) != NULL) { err = lio_xmit(lio, iq, &next); if (err) { if (next == NULL) drbr_advance(ifp, iq->br); else drbr_putback(ifp, iq->br, next); break; } drbr_advance(ifp, iq->br); /* Send a copy of the frame to the BPF listener */ ETHER_BPF_MTAP(ifp, next); if (((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) || (!lio->linfo.link.s.link_up)) break; } return (err); } int lio_mq_start(struct ifnet *ifp, struct mbuf *m) { struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; struct lio_instr_queue *iq; int err = 0, i; #ifdef RSS uint32_t bucket_id; #endif if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { #ifdef RSS if (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m), &bucket_id) == 0) { i = bucket_id % oct->num_iqs; if (bucket_id > oct->num_iqs) lio_dev_dbg(oct, "bucket_id (%d) > num_iqs (%d)\n", bucket_id, oct->num_iqs); } else #endif i = m->m_pkthdr.flowid % oct->num_iqs; } else i = curcpu % oct->num_iqs; iq = oct->instr_queue[i]; err = drbr_enqueue(ifp, iq->br, m); if (err) return (err); if (mtx_trylock(&iq->enq_lock)) { lio_mq_start_locked(ifp, iq); mtx_unlock(&iq->enq_lock); } return (err); } void lio_qflush(struct ifnet *ifp) { struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; struct lio_instr_queue *iq; struct mbuf *m; int i; for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) { if (!(oct->io_qmask.iq & BIT_ULL(i))) continue; iq = oct->instr_queue[i]; mtx_lock(&iq->enq_lock); while ((m = buf_ring_dequeue_sc(iq->br)) != NULL) m_freem(m); mtx_unlock(&iq->enq_lock); } if_qflush(ifp); } Index: head/sys/dev/liquidio/lio_sysctl.c =================================================================== --- head/sys/dev/liquidio/lio_sysctl.c (revision 324993) +++ head/sys/dev/liquidio/lio_sysctl.c (revision 324994) @@ -1,1974 +1,1976 @@ /* * BSD LICENSE * * Copyright(c) 2017 Cavium, Inc.. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Cavium, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include #include "lio_bsd.h" #include "lio_common.h" #include "lio_droq.h" #include "lio_iq.h" #include "lio_response_manager.h" #include "lio_device.h" #include "lio_network.h" #include "lio_ctrl.h" #include "cn23xx_pf_device.h" #include "lio_image.h" #include "lio_main.h" #include "lio_rxtx.h" #include "lio_ioctl.h" #define LIO_OFF_PAUSE 0 #define LIO_RX_PAUSE 1 #define LIO_TX_PAUSE 2 #define LIO_REGDUMP_LEN 4096 #define LIO_REGDUMP_LEN_23XX 49248 #define LIO_REGDUMP_LEN_XXXX LIO_REGDUMP_LEN_23XX #define LIO_USE_ADAPTIVE_RX_COALESCE 1 #define LIO_USE_ADAPTIVE_TX_COALESCE 2 #define LIO_RX_COALESCE_USECS 3 #define LIO_RX_MAX_COALESCED_FRAMES 4 #define LIO_TX_MAX_COALESCED_FRAMES 8 #define LIO_PKT_RATE_LOW 12 #define LIO_RX_COALESCE_USECS_LOW 13 #define LIO_RX_MAX_COALESCED_FRAMES_LOW 14 #define LIO_TX_MAX_COALESCED_FRAMES_LOW 16 #define LIO_PKT_RATE_HIGH 17 #define LIO_RX_COALESCE_USECS_HIGH 18 #define LIO_RX_MAX_COALESCED_FRAMES_HIGH 19 #define LIO_TX_MAX_COALESCED_FRAMES_HIGH 21 #define LIO_RATE_SAMPLE_INTERVAL 22 #define LIO_SET_RING_RX 1 #define LIO_SET_RING_TX 2 static int lio_get_eeprom(SYSCTL_HANDLER_ARGS); static int lio_get_set_pauseparam(SYSCTL_HANDLER_ARGS); static int lio_get_regs(SYSCTL_HANDLER_ARGS); static int lio_cn23xx_pf_read_csr_reg(char *s, struct octeon_device *oct); static int lio_get_set_fwmsglevel(SYSCTL_HANDLER_ARGS); static int lio_set_stats_interval(SYSCTL_HANDLER_ARGS); static void lio_get_fw_stats(void *arg); static int lio_get_set_intr_coalesce(SYSCTL_HANDLER_ARGS); static int lio_get_intrmod_cfg(struct lio *lio, struct octeon_intrmod_cfg *intr_cfg); static int lio_get_ringparam(SYSCTL_HANDLER_ARGS); static int lio_set_ringparam(SYSCTL_HANDLER_ARGS); static int lio_get_channels(SYSCTL_HANDLER_ARGS); static int lio_set_channels(SYSCTL_HANDLER_ARGS); static int lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs); struct lio_intrmod_context { int octeon_id; volatile int cond; int status; }; struct lio_intrmod_resp { uint64_t rh; struct octeon_intrmod_cfg intrmod; uint64_t status; }; static int lio_send_queue_count_update(struct ifnet *ifp, uint32_t num_queues) { struct lio_ctrl_pkt nctrl; struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; int ret = 0; bzero(&nctrl, sizeof(struct lio_ctrl_pkt)); nctrl.ncmd.cmd64 = 0; nctrl.ncmd.s.cmd = LIO_CMD_QUEUE_COUNT_CTL; nctrl.ncmd.s.param1 = num_queues; nctrl.ncmd.s.param2 = num_queues; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 100; nctrl.lio = lio; nctrl.cb_fn = lio_ctrl_cmd_completion; ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { lio_dev_err(oct, "Failed to send Queue reset command (ret: 0x%x)\n", ret); return (-1); } return (0); } /* Add sysctl variables to the system, one per statistic. */ void lio_add_hw_stats(struct lio *lio) { struct octeon_device *oct_dev = lio->oct_dev; device_t dev = oct_dev->device; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); struct sysctl_oid *stat_node, *queue_node, *root_node; struct sysctl_oid_list *stat_list, *queue_list, *root_list; #define QUEUE_NAME_LEN 32 char namebuf[QUEUE_NAME_LEN]; callout_reset(&lio->stats_timer, lio_ms_to_ticks(lio->stats_interval), lio_get_fw_stats, lio); SYSCTL_ADD_STRING(ctx, child, OID_AUTO, "fwversion", CTLFLAG_RD, oct_dev->fw_info.lio_firmware_version, 0, "Firmware version"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "stats_interval", CTLTYPE_INT | CTLFLAG_RW, lio, 0, lio_set_stats_interval, "I", "Set Stats Updation Timer in milli seconds"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "link_state_changes", CTLFLAG_RD, &lio->link_changes, "Link Change Counter"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "eeprom-dump", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, lio, 0, lio_get_eeprom, "A", "EEPROM information"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW, lio, 0, lio_get_set_pauseparam, "I", "Get and set pause parameters.\n" \ "0 - off\n" \ "1 - rx pause\n" \ "2 - tx pause \n" \ "3 - rx and tx pause"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "register-dump", CTLTYPE_STRING | CTLFLAG_RD, lio, 0, lio_get_regs, "A", "Dump registers in raw format"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fwmsglevel", CTLTYPE_INT | CTLFLAG_RW, lio, 0, lio_get_set_fwmsglevel, "I", "Get or set firmware message level"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxq_descriptors", CTLTYPE_INT | CTLFLAG_RW, lio, LIO_SET_RING_RX, lio_set_ringparam, "I", "Set RX ring parameter"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txq_descriptors", CTLTYPE_INT | CTLFLAG_RW, lio, LIO_SET_RING_TX, lio_set_ringparam, "I", "Set TX ring parameter"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "max_rxq_descriptors", CTLTYPE_INT | CTLFLAG_RD, lio, LIO_SET_RING_RX, lio_get_ringparam, "I", "Max RX descriptors"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "max_txq_descriptors", CTLTYPE_INT | CTLFLAG_RD, lio, LIO_SET_RING_TX, lio_get_ringparam, "I", "Max TX descriptors"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "active_queues", CTLTYPE_INT | CTLFLAG_RW, lio, 0, lio_set_channels, "I", "Set channels information"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "max_queues", CTLTYPE_INT | CTLFLAG_RD, lio, 0, lio_get_channels, "I", "Get channels information"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_budget", CTLFLAG_RW, &oct_dev->tx_budget, 0, "TX process pkt budget"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_budget", CTLFLAG_RW, &oct_dev->rx_budget, 0, "RX process pkt budget"); /* IRQ Coalescing Parameters */ root_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "coalesce", CTLFLAG_RD, NULL, "Get and Set Coalesce"); root_list = SYSCTL_CHILDREN(root_node); if (lio_get_intrmod_cfg(lio, &lio->intrmod_cfg)) lio_dev_info(oct_dev, "Coalescing driver update failed!\n"); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "sample-interval", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_RATE_SAMPLE_INTERVAL, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "tx-frame-high", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_TX_MAX_COALESCED_FRAMES_HIGH, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "rx-frame-high", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_RX_MAX_COALESCED_FRAMES_HIGH, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "rx-usecs-high", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_RX_COALESCE_USECS_HIGH, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "pkt-rate-high", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_PKT_RATE_HIGH, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "tx-frame-low", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_TX_MAX_COALESCED_FRAMES_LOW, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "rx-frame-low", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_RX_MAX_COALESCED_FRAMES_LOW, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "rx-usecs-low", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_RX_COALESCE_USECS_LOW, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "pkt-rate-low", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_PKT_RATE_LOW, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "tx-frames", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_TX_MAX_COALESCED_FRAMES, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "rx-frames", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_RX_MAX_COALESCED_FRAMES, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "rx-usecs", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_RX_COALESCE_USECS, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "adaptive-tx", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_USE_ADAPTIVE_TX_COALESCE, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "adaptive-rx", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_USE_ADAPTIVE_RX_COALESCE, lio_get_set_intr_coalesce, "QU", NULL); /* Root Node of all the Stats */ root_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, NULL, "Root Node of all the Stats"); root_list = SYSCTL_CHILDREN(root_node); /* Firmware Tx Stats */ stat_node = SYSCTL_ADD_NODE(ctx, root_list, OID_AUTO, "fwtx",CTLFLAG_RD, NULL, "Firmware Tx Statistics"); stat_list = SYSCTL_CHILDREN(stat_node); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_total_sent", CTLFLAG_RD, &oct_dev->link_stats.fromhost.fw_total_sent, "Firmware Total Packets Sent"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_total_fwd", CTLFLAG_RD, &oct_dev->link_stats.fromhost.fw_total_fwd, "Firmware Total Packets Forwarded"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_total_fwd_bytes", CTLFLAG_RD, &oct_dev->link_stats.fromhost.fw_total_fwd_bytes, "Firmware Total Bytes Forwarded"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_err_pko", CTLFLAG_RD, &oct_dev->link_stats.fromhost.fw_err_pko, "Firmware Tx PKO Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_err_pki", CTLFLAG_RD, &oct_dev->link_stats.fromhost.fw_err_pki, "Firmware Tx PKI Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_err_link", CTLFLAG_RD, &oct_dev->link_stats.fromhost.fw_err_link, "Firmware Tx Link Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_err_drop", CTLFLAG_RD, &oct_dev->link_stats.fromhost.fw_err_drop, "Firmware Tx Packets Dropped"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fw_tso", CTLFLAG_RD, &oct_dev->link_stats.fromhost.fw_tso, "Firmware Tx TSO"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_tso_packets", CTLFLAG_RD, &oct_dev->link_stats.fromhost.fw_tso_fwd, "Firmware Tx TSO Packets"); //SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_tso_err", CTLFLAG_RD, //&oct_dev->link_stats.fromhost.fw_tso_err, //"Firmware Tx TSO Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_vxlan", CTLFLAG_RD, &oct_dev->link_stats.fromhost.fw_tx_vxlan, "Firmware Tx VXLAN"); /* MAC Tx Stats */ stat_node = SYSCTL_ADD_NODE(ctx, root_list, OID_AUTO, "mactx", CTLFLAG_RD, NULL, "MAC Tx Statistics"); stat_list = SYSCTL_CHILDREN(stat_node); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_tx_total_pkts", CTLFLAG_RD, &oct_dev->link_stats.fromhost.total_pkts_sent, "Link-Level Total Packets Sent"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_tx_total_bytes", CTLFLAG_RD, &oct_dev->link_stats.fromhost.total_bytes_sent, "Link-Level Total Bytes Sent"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_tx_mcast_pkts", CTLFLAG_RD, &oct_dev->link_stats.fromhost.mcast_pkts_sent, "Link-Level Multicast Packets Sent"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_tx_bcast_pkts", CTLFLAG_RD, &oct_dev->link_stats.fromhost.bcast_pkts_sent, "Link-Level Broadcast Packets Sent"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_tx_ctl_packets", CTLFLAG_RD, &oct_dev->link_stats.fromhost.ctl_sent, "Link-Level Control Packets Sent"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_tx_total_collisions", CTLFLAG_RD, &oct_dev->link_stats.fromhost.total_collisions, "Link-Level Tx Total Collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_tx_one_collision", CTLFLAG_RD, &oct_dev->link_stats.fromhost.one_collision_sent, "Link-Level Tx One Collision Sent"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_tx_multi_collison", CTLFLAG_RD, &oct_dev->link_stats.fromhost.multi_collision_sent, "Link-Level Tx Multi-Collision Sent"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_tx_max_collision_fail", CTLFLAG_RD, &oct_dev->link_stats.fromhost.max_collision_fail, "Link-Level Tx Max Collision Failed"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_tx_max_deferal_fail", CTLFLAG_RD, &oct_dev->link_stats.fromhost.max_deferral_fail, "Link-Level Tx Max Deferral Failed"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_tx_fifo_err", CTLFLAG_RD, &oct_dev->link_stats.fromhost.fifo_err, "Link-Level Tx FIFO Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_tx_runts", CTLFLAG_RD, &oct_dev->link_stats.fromhost.runts, "Link-Level Tx Runts"); /* Firmware Rx Stats */ stat_node = SYSCTL_ADD_NODE(ctx, root_list, OID_AUTO, "fwrx", CTLFLAG_RD, NULL, "Firmware Rx Statistics"); stat_list = SYSCTL_CHILDREN(stat_node); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_total_rcvd", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_total_rcvd, "Firmware Total Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_total_fwd", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_total_fwd, "Firmware Total Packets Forwarded"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_jabber_err", CTLFLAG_RD, &oct_dev->link_stats.fromwire.jabber_err, "Firmware Rx Jabber Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_l2_err", CTLFLAG_RD, &oct_dev->link_stats.fromwire.l2_err, "Firmware Rx L2 Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frame_err", CTLFLAG_RD, &oct_dev->link_stats.fromwire.frame_err, "Firmware Rx Frame Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_err_pko", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_err_pko, "Firmware Rx PKO Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_err_link", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_err_link, "Firmware Rx Link Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_err_drop", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_err_drop, "Firmware Rx Dropped"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_vxlan", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_rx_vxlan, "Firmware Rx VXLAN"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_vxlan_err", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_rx_vxlan_err, "Firmware Rx VXLAN Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_lro_pkts", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_lro_pkts, "Firmware Rx LRO Packets"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_lro_bytes", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_lro_octs, "Firmware Rx LRO Bytes"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_total_lro", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_total_lro, "Firmware Rx Total LRO"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_lro_aborts", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_lro_aborts, "Firmware Rx LRO Aborts"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_lro_aborts_port", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_lro_aborts_port, "Firmware Rx LRO Aborts Port"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_lro_aborts_seq", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_lro_aborts_seq, "Firmware Rx LRO Aborts Sequence"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_lro_aborts_tsval", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_lro_aborts_tsval, "Firmware Rx LRO Aborts tsval"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_lro_aborts_timer", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_lro_aborts_timer, "Firmware Rx LRO Aborts Timer"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_fwd_rate", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fwd_rate, "Firmware Rx Packets Forward Rate"); /* MAC Rx Stats */ stat_node = SYSCTL_ADD_NODE(ctx, root_list, OID_AUTO, "macrx", CTLFLAG_RD, NULL, "MAC Rx Statistics"); stat_list = SYSCTL_CHILDREN(stat_node); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_rx_total_rcvd", CTLFLAG_RD, &oct_dev->link_stats.fromwire.total_rcvd, "Link-Level Total Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_rx_bytes", CTLFLAG_RD, &oct_dev->link_stats.fromwire.bytes_rcvd, "Link-Level Total Bytes Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_rx_total_bcst", CTLFLAG_RD, &oct_dev->link_stats.fromwire.total_bcst, "Link-Level Total Broadcast"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_rx_total_mcst", CTLFLAG_RD, &oct_dev->link_stats.fromwire.total_mcst, "Link-Level Total Multicast"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_rx_runts", CTLFLAG_RD, &oct_dev->link_stats.fromwire.runts, "Link-Level Rx Runts"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_rx_ctl_packets", CTLFLAG_RD, &oct_dev->link_stats.fromwire.ctl_rcvd, "Link-Level Rx Control Packets"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_rx_fifo_err", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fifo_err, "Link-Level Rx FIFO Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_rx_dma_drop", CTLFLAG_RD, &oct_dev->link_stats.fromwire.dmac_drop, "Link-Level Rx DMA Dropped"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_rx_fcs_err", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fcs_err, "Link-Level Rx FCS Errors"); /* TX */ for (int i = 0; i < oct_dev->num_iqs; i++) { if (!(oct_dev->io_qmask.iq & BIT_ULL(i))) continue; snprintf(namebuf, QUEUE_NAME_LEN, "tx-%d", i); queue_node = SYSCTL_ADD_NODE(ctx, root_list, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Input Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); /* packets to network port */ /* # of packets tx to network */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.tx_done, "Number of Packets Tx to Network"); /* # of bytes tx to network */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.tx_tot_bytes, "Number of Bytes Tx to Network"); /* # of packets dropped */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.tx_dropped, "Number of Tx Packets Dropped"); /* # of tx fails due to queue full */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "iq_busy", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.tx_iq_busy, "Number of Tx Fails Due to Queue Full"); /* scatter gather entries sent */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "sgentry_sent", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.sgentry_sent, "Scatter Gather Entries Sent"); /* instruction to firmware: data and control */ /* # of instructions to the queue */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "fw_instr_posted", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.instr_posted, "Number of Instructions to The Queue"); /* # of instructions processed */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "fw_instr_processed", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.instr_processed, "Number of Instructions Processed"); /* # of instructions could not be processed */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "fw_instr_dropped", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.instr_dropped, "Number of Instructions Dropped"); /* bytes sent through the queue */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "fw_bytes_sent", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.bytes_sent, "Bytes Sent Through The Queue"); /* tso request */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.tx_gso, "TSO Request"); /* vxlan request */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "vxlan", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.tx_vxlan, "VXLAN Request"); /* txq restart */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "txq_restart", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.tx_restart, "TxQ Restart"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_fail", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.tx_dmamap_fail, "TxQ DMA Map Failed"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.mbuf_defrag_failed, "TxQ defrag Failed"); } /* RX */ for (int i = 0; i < oct_dev->num_oqs; i++) { if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) continue; snprintf(namebuf, QUEUE_NAME_LEN, "rx-%d", i); queue_node = SYSCTL_ADD_NODE(ctx, root_list, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Output Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); /* packets send to TCP/IP network stack */ /* # of packets to network stack */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets", CTLFLAG_RD, &oct_dev->droq[i]->stats.rx_pkts_received, "Number of Packets to Network Stack"); /* # of bytes to network stack */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes", CTLFLAG_RD, &oct_dev->droq[i]->stats.rx_bytes_received, "Number of Bytes to Network Stack"); /* # of packets dropped */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped_nomem", CTLFLAG_RD, &oct_dev->droq[i]->stats.dropped_nomem, "Packets Dropped Due to No Memory"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped_toomany", CTLFLAG_RD, &oct_dev->droq[i]->stats.dropped_toomany, "Packets dropped, Too Many Pkts to Process"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "fw_dropped", CTLFLAG_RD, &oct_dev->droq[i]->stats.rx_dropped, "Packets Dropped due to Receive path failures"); /* control and data path */ /* # packets sent to stack from this queue. */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "fw_pkts_received", CTLFLAG_RD, &oct_dev->droq[i]->stats.pkts_received, "Number of Packets Received"); /* # Bytes sent to stack from this queue. */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "fw_bytes_received", CTLFLAG_RD, &oct_dev->droq[i]->stats.bytes_received, "Number of Bytes Received"); /* Packets dropped due to no dispatch function. */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "fw_dropped_nodispatch", CTLFLAG_RD, &oct_dev->droq[i]->stats.dropped_nodispatch, "Packets Dropped, No Dispatch Function"); /* Rx VXLAN */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "vxlan", CTLFLAG_RD, &oct_dev->droq[i]->stats.rx_vxlan, "Rx VXLAN"); /* # failures of lio_recv_buffer_alloc */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "buffer_alloc_failure", CTLFLAG_RD, &oct_dev->droq[i]->stats.rx_alloc_failure, "Number of Failures of lio_recv_buffer_alloc"); } } static int lio_get_eeprom(SYSCTL_HANDLER_ARGS) { struct lio *lio = (struct lio *)arg1; struct octeon_device *oct_dev = lio->oct_dev; struct lio_board_info *board_info; char buf[512]; board_info = (struct lio_board_info *)(&oct_dev->boardinfo); if (oct_dev->uboot_len == 0) - sprintf(buf, "boardname:%s serialnum:%s maj:%ld min:%ld", + sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld", board_info->name, board_info->serial_number, - board_info->major, board_info->minor); + LIO_CAST64(board_info->major), + LIO_CAST64(board_info->minor)); else { - sprintf(buf, "boardname:%s serialnum:%s maj:%ld min:%ld\n%s", + sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n%s", board_info->name, board_info->serial_number, - board_info->major, board_info->minor, + LIO_CAST64(board_info->major), + LIO_CAST64(board_info->minor), &oct_dev->uboot_version[oct_dev->uboot_sidx]); } return (sysctl_handle_string(oidp, buf, strlen(buf), req)); } /* * Get and set pause parameters or flow control using sysctl: * 0 - off * 1 - rx pause * 2 - tx pause * 3 - full */ static int lio_get_set_pauseparam(SYSCTL_HANDLER_ARGS) { /* Notes: Not supporting any auto negotiation in these drivers. */ struct lio_ctrl_pkt nctrl; struct lio *lio = (struct lio *)arg1; struct octeon_device *oct = lio->oct_dev; struct octeon_link_info *linfo = &lio->linfo; int err, new_pause = LIO_OFF_PAUSE, old_pause = LIO_OFF_PAUSE; int ret = 0; if (oct->chip_id != LIO_CN23XX_PF_VID) return (EINVAL); if (oct->rx_pause) old_pause |= LIO_RX_PAUSE; if (oct->tx_pause) old_pause |= LIO_TX_PAUSE; new_pause = old_pause; err = sysctl_handle_int(oidp, &new_pause, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_pause == new_pause) return (0); if (linfo->link.s.duplex == 0) { /* no flow control for half duplex */ if (new_pause) return (EINVAL); } bzero(&nctrl, sizeof(struct lio_ctrl_pkt)); nctrl.ncmd.cmd64 = 0; nctrl.ncmd.s.cmd = LIO_CMD_SET_FLOW_CTL; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 100; nctrl.lio = lio; nctrl.cb_fn = lio_ctrl_cmd_completion; if (new_pause & LIO_RX_PAUSE) { /* enable rx pause */ nctrl.ncmd.s.param1 = 1; } else { /* disable rx pause */ nctrl.ncmd.s.param1 = 0; } if (new_pause & LIO_TX_PAUSE) { /* enable tx pause */ nctrl.ncmd.s.param2 = 1; } else { /* disable tx pause */ nctrl.ncmd.s.param2 = 0; } ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { lio_dev_err(oct, "Failed to set pause parameter\n"); return (EINVAL); } oct->rx_pause = new_pause & LIO_RX_PAUSE; oct->tx_pause = new_pause & LIO_TX_PAUSE; return (0); } /* Return register dump user app. */ static int lio_get_regs(SYSCTL_HANDLER_ARGS) { struct lio *lio = (struct lio *)arg1; struct octeon_device *oct = lio->oct_dev; struct ifnet *ifp = lio->ifp; char *regbuf; int error = EINVAL, len = 0; if (!(if_getflags(ifp) & IFF_DEBUG)) { char debug_info[30] = "Debugging is disabled"; return (sysctl_handle_string(oidp, debug_info, strlen(debug_info), req)); } regbuf = malloc(sizeof(char) * LIO_REGDUMP_LEN_XXXX, M_DEVBUF, M_WAITOK | M_ZERO); if (regbuf == NULL) return (error); switch (oct->chip_id) { case LIO_CN23XX_PF_VID: len += lio_cn23xx_pf_read_csr_reg(regbuf, oct); break; default: len += sprintf(regbuf, "%s Unknown chipid: %d\n", __func__, oct->chip_id); } error = sysctl_handle_string(oidp, regbuf, len, req); free(regbuf, M_DEVBUF); return (error); } static int lio_cn23xx_pf_read_csr_reg(char *s, struct octeon_device *oct) { uint32_t reg; int i, len = 0; uint8_t pf_num = oct->pf_num; /* PCI Window Registers */ len += sprintf(s + len, "\t Octeon CSR Registers\n\n"); /* 0x29030 or 0x29040 */ reg = LIO_CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num); - len += sprintf(s + len, "[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016lx\n", + len += sprintf(s + len, "[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n", reg, oct->pcie_port, oct->pf_num, - lio_read_csr64(oct, reg)); + LIO_CAST64(lio_read_csr64(oct, reg))); /* 0x27080 or 0x27090 */ reg = LIO_CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num); - len += sprintf(s + len, "[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016lx\n", + len += sprintf(s + len, "[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n", reg, oct->pcie_port, oct->pf_num, - lio_read_csr64(oct, reg)); + LIO_CAST64(lio_read_csr64(oct, reg))); /* 0x27000 or 0x27010 */ reg = LIO_CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num); - len += sprintf(s + len, "[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016lx\n", + len += sprintf(s + len, "[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n", reg, oct->pcie_port, oct->pf_num, - lio_read_csr64(oct, reg)); + LIO_CAST64(lio_read_csr64(oct, reg))); /* 0x29120 */ reg = 0x29120; - len += sprintf(s + len, "[%08x] (SLI_PKT_MEM_CTL): %016lx\n", reg, - lio_read_csr64(oct, reg)); + len += sprintf(s + len, "[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg, + LIO_CAST64(lio_read_csr64(oct, reg))); /* 0x27300 */ reg = 0x27300 + oct->pcie_port * LIO_CN23XX_MAC_INT_OFFSET + (oct->pf_num) * LIO_CN23XX_PF_INT_OFFSET; - len += sprintf(s + len, "[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016lx\n", + len += sprintf(s + len, "[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg, oct->pcie_port, oct->pf_num, - lio_read_csr64(oct, reg)); + LIO_CAST64(lio_read_csr64(oct, reg))); /* 0x27200 */ reg = 0x27200 + oct->pcie_port * LIO_CN23XX_MAC_INT_OFFSET + (oct->pf_num) * LIO_CN23XX_PF_INT_OFFSET; - len += sprintf(s + len, "[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016lx\n", + len += sprintf(s + len, "[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n", reg, oct->pcie_port, oct->pf_num, - lio_read_csr64(oct, reg)); + LIO_CAST64(lio_read_csr64(oct, reg))); /* 29130 */ reg = LIO_CN23XX_SLI_PKT_CNT_INT; - len += sprintf(s + len, "[%08x] (SLI_PKT_CNT_INT): %016lx\n", reg, - lio_read_csr64(oct, reg)); + len += sprintf(s + len, "[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg, + LIO_CAST64(lio_read_csr64(oct, reg))); /* 0x29140 */ reg = LIO_CN23XX_SLI_PKT_TIME_INT; - len += sprintf(s + len, "[%08x] (SLI_PKT_TIME_INT): %016lx\n", reg, - lio_read_csr64(oct, reg)); + len += sprintf(s + len, "[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg, + LIO_CAST64(lio_read_csr64(oct, reg))); /* 0x29160 */ reg = 0x29160; - len += sprintf(s + len, "[%08x] (SLI_PKT_INT): %016lx\n", reg, - lio_read_csr64(oct, reg)); + len += sprintf(s + len, "[%08x] (SLI_PKT_INT): %016llx\n", reg, + LIO_CAST64(lio_read_csr64(oct, reg))); /* 0x29180 */ reg = LIO_CN23XX_SLI_OQ_WMARK; - len += sprintf(s + len, "[%08x] (SLI_PKT_OUTPUT_WMARK): %016lx\n", - reg, lio_read_csr64(oct, reg)); + len += sprintf(s + len, "[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n", + reg, LIO_CAST64(lio_read_csr64(oct, reg))); /* 0x291E0 */ reg = LIO_CN23XX_SLI_PKT_IOQ_RING_RST; - len += sprintf(s + len, "[%08x] (SLI_PKT_RING_RST): %016lx\n", reg, - lio_read_csr64(oct, reg)); + len += sprintf(s + len, "[%08x] (SLI_PKT_RING_RST): %016llx\n", reg, + LIO_CAST64(lio_read_csr64(oct, reg))); /* 0x29210 */ reg = LIO_CN23XX_SLI_GBL_CONTROL; - len += sprintf(s + len, "[%08x] (SLI_PKT_GBL_CONTROL): %016lx\n", reg, - lio_read_csr64(oct, reg)); + len += sprintf(s + len, "[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg, + LIO_CAST64(lio_read_csr64(oct, reg))); /* 0x29220 */ reg = 0x29220; - len += sprintf(s + len, "[%08x] (SLI_PKT_BIST_STATUS): %016lx\n", - reg, lio_read_csr64(oct, reg)); + len += sprintf(s + len, "[%08x] (SLI_PKT_BIST_STATUS): %016llx\n", + reg, LIO_CAST64(lio_read_csr64(oct, reg))); /* PF only */ if (pf_num == 0) { /* 0x29260 */ reg = LIO_CN23XX_SLI_OUT_BP_EN_W1S; - len += sprintf(s + len, "[%08x] (SLI_PKT_OUT_BP_EN_W1S): %016lx\n", - reg, lio_read_csr64(oct, reg)); + len += sprintf(s + len, "[%08x] (SLI_PKT_OUT_BP_EN_W1S): %016llx\n", + reg, LIO_CAST64(lio_read_csr64(oct, reg))); } else if (pf_num == 1) { /* 0x29270 */ reg = LIO_CN23XX_SLI_OUT_BP_EN2_W1S; - len += sprintf(s + len, "[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016lx\n", - reg, lio_read_csr64(oct, reg)); + len += sprintf(s + len, "[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n", + reg, LIO_CAST64(lio_read_csr64(oct, reg))); } for (i = 0; i < LIO_CN23XX_PF_MAX_OUTPUT_QUEUES; i++) { reg = LIO_CN23XX_SLI_OQ_BUFF_INFO_SIZE(i); - len += sprintf(s + len, "[%08x] (SLI_PKT%d_OUT_SIZE): %016lx\n", - reg, i, lio_read_csr64(oct, reg)); + len += sprintf(s + len, "[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n", + reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x10040 */ for (i = 0; i < LIO_CN23XX_PF_MAX_INPUT_QUEUES; i++) { reg = LIO_CN23XX_SLI_IQ_INSTR_COUNT64(i); - len += sprintf(s + len, "[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016lx\n", - reg, i, lio_read_csr64(oct, reg)); + len += sprintf(s + len, "[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", + reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x10080 */ for (i = 0; i < LIO_CN23XX_PF_MAX_OUTPUT_QUEUES; i++) { reg = LIO_CN23XX_SLI_OQ_PKTS_CREDIT(i); - len += sprintf(s + len, "[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016lx\n", - reg, i, lio_read_csr64(oct, reg)); + len += sprintf(s + len, "[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n", + reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x10090 */ for (i = 0; i < LIO_CN23XX_PF_MAX_OUTPUT_QUEUES; i++) { reg = LIO_CN23XX_SLI_OQ_SIZE(i); - len += sprintf(s + len, "[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016lx\n", - reg, i, lio_read_csr64(oct, reg)); + len += sprintf(s + len, "[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n", + reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x10050 */ for (i = 0; i < LIO_CN23XX_PF_MAX_OUTPUT_QUEUES; i++) { reg = LIO_CN23XX_SLI_OQ_PKT_CONTROL(i); - len += sprintf(s + len, "[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016lx\n", - reg, i, lio_read_csr64(oct, reg)); + len += sprintf(s + len, "[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n", + reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x10070 */ for (i = 0; i < LIO_CN23XX_PF_MAX_OUTPUT_QUEUES; i++) { reg = LIO_CN23XX_SLI_OQ_BASE_ADDR64(i); - len += sprintf(s + len, "[%08x] (SLI_PKT%d_SLIST_BADDR): %016lx\n", - reg, i, lio_read_csr64(oct, reg)); + len += sprintf(s + len, "[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n", + reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x100a0 */ for (i = 0; i < LIO_CN23XX_PF_MAX_OUTPUT_QUEUES; i++) { reg = LIO_CN23XX_SLI_OQ_PKT_INT_LEVELS(i); - len += sprintf(s + len, "[%08x] (SLI_PKT%d_INT_LEVELS): %016lx\n", - reg, i, lio_read_csr64(oct, reg)); + len += sprintf(s + len, "[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n", + reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x100b0 */ for (i = 0; i < LIO_CN23XX_PF_MAX_OUTPUT_QUEUES; i++) { reg = LIO_CN23XX_SLI_OQ_PKTS_SENT(i); - len += sprintf(s + len, "[%08x] (SLI_PKT%d_CNTS): %016lx\n", - reg, i, lio_read_csr64(oct, reg)); + len += sprintf(s + len, "[%08x] (SLI_PKT%d_CNTS): %016llx\n", + reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x100c0 */ for (i = 0; i < LIO_CN23XX_PF_MAX_OUTPUT_QUEUES; i++) { reg = 0x100c0 + i * LIO_CN23XX_OQ_OFFSET; - len += sprintf(s + len, "[%08x] (SLI_PKT%d_ERROR_INFO): %016lx\n", - reg, i, lio_read_csr64(oct, reg)); + len += sprintf(s + len, "[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n", + reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x10000 */ for (i = 0; i < LIO_CN23XX_PF_MAX_INPUT_QUEUES; i++) { reg = LIO_CN23XX_SLI_IQ_PKT_CONTROL64(i); - len += sprintf(s + len, "[%08x] (SLI_PKT%d_INPUT_CONTROL): %016lx\n", - reg, i, lio_read_csr64(oct, reg)); + len += sprintf(s + len, "[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n", + reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x10010 */ for (i = 0; i < LIO_CN23XX_PF_MAX_INPUT_QUEUES; i++) { reg = LIO_CN23XX_SLI_IQ_BASE_ADDR64(i); - len += sprintf(s + len, "[%08x] (SLI_PKT%d_INSTR_BADDR): %016lx\n", - reg, i, lio_read_csr64(oct, reg)); + len += sprintf(s + len, "[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", + reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x10020 */ for (i = 0; i < LIO_CN23XX_PF_MAX_INPUT_QUEUES; i++) { reg = LIO_CN23XX_SLI_IQ_DOORBELL(i); - len += sprintf(s + len, "[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016lx\n", - reg, i, lio_read_csr64(oct, reg)); + len += sprintf(s + len, "[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n", + reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x10030 */ for (i = 0; i < LIO_CN23XX_PF_MAX_INPUT_QUEUES; i++) { reg = LIO_CN23XX_SLI_IQ_SIZE(i); - len += sprintf(s + len, "[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016lx\n", - reg, i, lio_read_csr64(oct, reg)); + len += sprintf(s + len, "[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n", + reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x10040 */ for (i = 0; i < LIO_CN23XX_PF_MAX_INPUT_QUEUES; i++) reg = LIO_CN23XX_SLI_IQ_INSTR_COUNT64(i); - len += sprintf(s + len, "[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016lx\n", - reg, i, lio_read_csr64(oct, reg)); + len += sprintf(s + len, "[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", + reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); return (len); } static int lio_get_ringparam(SYSCTL_HANDLER_ARGS) { struct lio *lio = (struct lio *)arg1; struct octeon_device *oct = lio->oct_dev; uint32_t rx_max_pending = 0, tx_max_pending = 0; int err; if (LIO_CN23XX_PF(oct)) { tx_max_pending = LIO_CN23XX_MAX_IQ_DESCRIPTORS; rx_max_pending = LIO_CN23XX_MAX_OQ_DESCRIPTORS; } switch (arg2) { case LIO_SET_RING_RX: err = sysctl_handle_int(oidp, &rx_max_pending, 0, req); break; case LIO_SET_RING_TX: err = sysctl_handle_int(oidp, &tx_max_pending, 0, req); break; } return (err); } static int lio_reset_queues(struct ifnet *ifp, uint32_t num_qs) { struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; int i, update = 0; if (lio_wait_for_pending_requests(oct)) lio_dev_err(oct, "There were pending requests\n"); if (lio_wait_for_instr_fetch(oct)) lio_dev_err(oct, "IQ had pending instructions\n"); /* * Disable the input and output queues now. No more packets will * arrive from Octeon. */ oct->fn_list.disable_io_queues(oct); if (num_qs != oct->num_iqs) update = 1; for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) { if (!(oct->io_qmask.oq & BIT_ULL(i))) continue; lio_delete_droq(oct, i); } for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) { if (!(oct->io_qmask.iq & BIT_ULL(i))) continue; lio_delete_instr_queue(oct, i); } if (oct->fn_list.setup_device_regs(oct)) { lio_dev_err(oct, "Failed to configure device registers\n"); return (-1); } if (lio_setup_io_queues(oct, 0, num_qs, num_qs)) { lio_dev_err(oct, "IO queues initialization failed\n"); return (-1); } if (update && lio_send_queue_count_update(ifp, num_qs)) return (-1); return (0); } static int lio_set_ringparam(SYSCTL_HANDLER_ARGS) { struct lio *lio = (struct lio *)arg1; struct octeon_device *oct = lio->oct_dev; uint32_t rx_count, rx_count_old, tx_count, tx_count_old; int err, stopped = 0; if (!LIO_CN23XX_PF(oct)) return (EINVAL); switch (arg2) { case LIO_SET_RING_RX: rx_count = rx_count_old = oct->droq[0]->max_count; err = sysctl_handle_int(oidp, &rx_count, 0, req); if ((err) || (req->newptr == NULL)) return (err); rx_count = min(max(rx_count, LIO_CN23XX_MIN_OQ_DESCRIPTORS), LIO_CN23XX_MAX_OQ_DESCRIPTORS); if (rx_count == rx_count_old) return (0); lio_ifstate_set(lio, LIO_IFSTATE_RESETTING); if (if_getdrvflags(lio->ifp) & IFF_DRV_RUNNING) { lio_stop(lio->ifp); stopped = 1; } /* Change RX DESCS count */ LIO_SET_NUM_RX_DESCS_NIC_IF(lio_get_conf(oct), lio->ifidx, rx_count); break; case LIO_SET_RING_TX: tx_count = tx_count_old = oct->instr_queue[0]->max_count; err = sysctl_handle_int(oidp, &tx_count, 0, req); if ((err) || (req->newptr == NULL)) return (err); tx_count = min(max(tx_count, LIO_CN23XX_MIN_IQ_DESCRIPTORS), LIO_CN23XX_MAX_IQ_DESCRIPTORS); if (tx_count == tx_count_old) return (0); lio_ifstate_set(lio, LIO_IFSTATE_RESETTING); if (if_getdrvflags(lio->ifp) & IFF_DRV_RUNNING) { lio_stop(lio->ifp); stopped = 1; } /* Change TX DESCS count */ LIO_SET_NUM_TX_DESCS_NIC_IF(lio_get_conf(oct), lio->ifidx, tx_count); break; } if (lio_reset_queues(lio->ifp, lio->linfo.num_txpciq)) goto err_lio_reset_queues; lio_irq_reallocate_irqs(oct, lio->linfo.num_txpciq); if (stopped) lio_open(lio); lio_ifstate_reset(lio, LIO_IFSTATE_RESETTING); return (0); err_lio_reset_queues: if (arg2 == LIO_SET_RING_RX && rx_count != rx_count_old) LIO_SET_NUM_RX_DESCS_NIC_IF(lio_get_conf(oct), lio->ifidx, rx_count_old); if (arg2 == LIO_SET_RING_TX && tx_count != tx_count_old) LIO_SET_NUM_TX_DESCS_NIC_IF(lio_get_conf(oct), lio->ifidx, tx_count_old); return (EINVAL); } static int lio_get_channels(SYSCTL_HANDLER_ARGS) { struct lio *lio = (struct lio *)arg1; struct octeon_device *oct = lio->oct_dev; uint32_t max_combined = 0; if (LIO_CN23XX_PF(oct)) max_combined = lio->linfo.num_txpciq; return (sysctl_handle_int(oidp, &max_combined, 0, req)); } static int lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs) { int i, num_msix_irqs = 0; if (!oct->msix_on) return (0); /* * Disable the input and output queues now. No more packets will * arrive from Octeon. */ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); if (oct->msix_on) { if (LIO_CN23XX_PF(oct)) num_msix_irqs = oct->num_msix_irqs - 1; for (i = 0; i < num_msix_irqs; i++) { if (oct->ioq_vector[i].tag != NULL) { bus_teardown_intr(oct->device, oct->ioq_vector[i].msix_res, oct->ioq_vector[i].tag); oct->ioq_vector[i].tag = NULL; } if (oct->ioq_vector[i].msix_res != NULL) { bus_release_resource(oct->device, SYS_RES_IRQ, oct->ioq_vector[i].vector, oct->ioq_vector[i].msix_res); oct->ioq_vector[i].msix_res = NULL; } } if (oct->tag != NULL) { bus_teardown_intr(oct->device, oct->msix_res, oct->tag); oct->tag = NULL; } if (oct->msix_res != NULL) { bus_release_resource(oct->device, SYS_RES_IRQ, oct->aux_vector, oct->msix_res); oct->msix_res = NULL; } pci_release_msi(oct->device); } if (lio_setup_interrupt(oct, num_ioqs)) { lio_dev_info(oct, "Setup interuupt failed\n"); return (1); } /* Enable Octeon device interrupts */ oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); return (0); } static int lio_set_channels(SYSCTL_HANDLER_ARGS) { struct lio *lio = (struct lio *)arg1; struct octeon_device *oct = lio->oct_dev; uint32_t combined_count, max_combined; int err, stopped = 0; if (strcmp(oct->fw_info.lio_firmware_version, "1.6.1") < 0) { lio_dev_err(oct, "Minimum firmware version required is 1.6.1\n"); return (EINVAL); } combined_count = oct->num_iqs; err = sysctl_handle_int(oidp, &combined_count, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (!combined_count) return (EINVAL); if (LIO_CN23XX_PF(oct)) { max_combined = lio->linfo.num_txpciq; } else { return (EINVAL); } if ((combined_count > max_combined) || (combined_count < 1)) return (EINVAL); if (combined_count == oct->num_iqs) return (0); lio_ifstate_set(lio, LIO_IFSTATE_RESETTING); if (if_getdrvflags(lio->ifp) & IFF_DRV_RUNNING) { lio_stop(lio->ifp); stopped = 1; } if (lio_reset_queues(lio->ifp, combined_count)) return (EINVAL); lio_irq_reallocate_irqs(oct, combined_count); if (stopped) lio_open(lio); lio_ifstate_reset(lio, LIO_IFSTATE_RESETTING); return (0); } static int lio_get_set_fwmsglevel(SYSCTL_HANDLER_ARGS) { struct lio *lio = (struct lio *)arg1; struct ifnet *ifp = lio->ifp; int err, new_msglvl = 0, old_msglvl = 0; if (lio_ifstate_check(lio, LIO_IFSTATE_RESETTING)) return (ENXIO); old_msglvl = new_msglvl = lio->msg_enable; err = sysctl_handle_int(oidp, &new_msglvl, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_msglvl == new_msglvl) return (0); if (new_msglvl ^ lio->msg_enable) { if (new_msglvl) err = lio_set_feature(ifp, LIO_CMD_VERBOSE_ENABLE, 0); else err = lio_set_feature(ifp, LIO_CMD_VERBOSE_DISABLE, 0); } lio->msg_enable = new_msglvl; return ((err) ? EINVAL : 0); } static int lio_set_stats_interval(SYSCTL_HANDLER_ARGS) { struct lio *lio = (struct lio *)arg1; int err, new_time = 0, old_time = 0; old_time = new_time = lio->stats_interval; err = sysctl_handle_int(oidp, &new_time, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_time == new_time) return (0); lio->stats_interval = new_time; return (0); } static void lio_fw_stats_callback(struct octeon_device *oct_dev, uint32_t status, void *ptr) { struct lio_soft_command *sc = (struct lio_soft_command *)ptr; struct lio_fw_stats_resp *resp = (struct lio_fw_stats_resp *)sc->virtrptr; struct octeon_rx_stats *rsp_rstats = &resp->stats.fromwire; struct octeon_tx_stats *rsp_tstats = &resp->stats.fromhost; struct octeon_rx_stats *rstats = &oct_dev->link_stats.fromwire; struct octeon_tx_stats *tstats = &oct_dev->link_stats.fromhost; struct ifnet *ifp = oct_dev->props.ifp; struct lio *lio = if_getsoftc(ifp); if ((status != LIO_REQUEST_TIMEOUT) && !resp->status) { lio_swap_8B_data((uint64_t *)&resp->stats, (sizeof(struct octeon_link_stats)) >> 3); /* RX link-level stats */ rstats->total_rcvd = rsp_rstats->total_rcvd; rstats->bytes_rcvd = rsp_rstats->bytes_rcvd; rstats->total_bcst = rsp_rstats->total_bcst; rstats->total_mcst = rsp_rstats->total_mcst; rstats->runts = rsp_rstats->runts; rstats->ctl_rcvd = rsp_rstats->ctl_rcvd; /* Accounts for over/under-run of buffers */ rstats->fifo_err = rsp_rstats->fifo_err; rstats->dmac_drop = rsp_rstats->dmac_drop; rstats->fcs_err = rsp_rstats->fcs_err; rstats->jabber_err = rsp_rstats->jabber_err; rstats->l2_err = rsp_rstats->l2_err; rstats->frame_err = rsp_rstats->frame_err; /* RX firmware stats */ rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd; rstats->fw_total_fwd = rsp_rstats->fw_total_fwd; rstats->fw_err_pko = rsp_rstats->fw_err_pko; rstats->fw_err_link = rsp_rstats->fw_err_link; rstats->fw_err_drop = rsp_rstats->fw_err_drop; rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan; rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err; /* Number of packets that are LROed */ rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts; /* Number of octets that are LROed */ rstats->fw_lro_octs = rsp_rstats->fw_lro_octs; /* Number of LRO packets formed */ rstats->fw_total_lro = rsp_rstats->fw_total_lro; /* Number of times lRO of packet aborted */ rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts; rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port; rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq; rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval; rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer; /* intrmod: packet forward rate */ rstats->fwd_rate = rsp_rstats->fwd_rate; /* TX link-level stats */ tstats->total_pkts_sent = rsp_tstats->total_pkts_sent; tstats->total_bytes_sent = rsp_tstats->total_bytes_sent; tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent; tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent; tstats->ctl_sent = rsp_tstats->ctl_sent; /* Packets sent after one collision */ tstats->one_collision_sent = rsp_tstats->one_collision_sent; /* Packets sent after multiple collision */ tstats->multi_collision_sent = rsp_tstats->multi_collision_sent; /* Packets not sent due to max collisions */ tstats->max_collision_fail = rsp_tstats->max_collision_fail; /* Packets not sent due to max deferrals */ tstats->max_deferral_fail = rsp_tstats->max_deferral_fail; /* Accounts for over/under-run of buffers */ tstats->fifo_err = rsp_tstats->fifo_err; tstats->runts = rsp_tstats->runts; /* Total number of collisions detected */ tstats->total_collisions = rsp_tstats->total_collisions; /* firmware stats */ tstats->fw_total_sent = rsp_tstats->fw_total_sent; tstats->fw_total_fwd = rsp_tstats->fw_total_fwd; tstats->fw_err_pko = rsp_tstats->fw_err_pko; tstats->fw_err_pki = rsp_tstats->fw_err_pki; tstats->fw_err_link = rsp_tstats->fw_err_link; tstats->fw_err_drop = rsp_tstats->fw_err_drop; tstats->fw_tso = rsp_tstats->fw_tso; tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd; tstats->fw_err_tso = rsp_tstats->fw_err_tso; tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan; } lio_free_soft_command(oct_dev, sc); callout_schedule(&lio->stats_timer, lio_ms_to_ticks(lio->stats_interval)); } /* Configure interrupt moderation parameters */ static void lio_get_fw_stats(void *arg) { struct lio *lio = arg; struct octeon_device *oct_dev = lio->oct_dev; struct lio_soft_command *sc; struct lio_fw_stats_resp *resp; int retval; if (callout_pending(&lio->stats_timer) || callout_active(&lio->stats_timer) == 0) return; /* Alloc soft command */ sc = lio_alloc_soft_command(oct_dev, 0, sizeof(struct lio_fw_stats_resp), 0); if (sc == NULL) goto alloc_sc_failed; resp = (struct lio_fw_stats_resp *)sc->virtrptr; bzero(resp, sizeof(struct lio_fw_stats_resp)); sc->iq_no = lio->linfo.txpciq[0].s.q_no; lio_prepare_soft_command(oct_dev, sc, LIO_OPCODE_NIC, LIO_OPCODE_NIC_PORT_STATS, 0, 0, 0); sc->callback = lio_fw_stats_callback; sc->callback_arg = sc; sc->wait_time = 500; /* in milli seconds */ retval = lio_send_soft_command(oct_dev, sc); if (retval == LIO_IQ_SEND_FAILED) goto send_sc_failed; return; send_sc_failed: lio_free_soft_command(oct_dev, sc); alloc_sc_failed: callout_schedule(&lio->stats_timer, lio_ms_to_ticks(lio->stats_interval)); } /* Callback function for intrmod */ static void lio_get_intrmod_callback(struct octeon_device *oct_dev, uint32_t status, void *ptr) { struct lio_soft_command *sc = (struct lio_soft_command *)ptr; struct ifnet *ifp = oct_dev->props.ifp; struct lio *lio = if_getsoftc(ifp); struct lio_intrmod_resp *resp; if (status) { lio_dev_err(oct_dev, "Failed to get intrmod\n"); } else { resp = (struct lio_intrmod_resp *)sc->virtrptr; lio_swap_8B_data((uint64_t *)&resp->intrmod, (sizeof(struct octeon_intrmod_cfg)) / 8); memcpy(&lio->intrmod_cfg, &resp->intrmod, sizeof(struct octeon_intrmod_cfg)); } lio_free_soft_command(oct_dev, sc); } /* get interrupt moderation parameters */ static int lio_get_intrmod_cfg(struct lio *lio, struct octeon_intrmod_cfg *intr_cfg) { struct lio_soft_command *sc; struct lio_intrmod_resp *resp; struct octeon_device *oct_dev = lio->oct_dev; int retval; /* Alloc soft command */ sc = lio_alloc_soft_command(oct_dev, 0, sizeof(struct lio_intrmod_resp), 0); if (sc == NULL) return (ENOMEM); resp = (struct lio_intrmod_resp *)sc->virtrptr; bzero(resp, sizeof(struct lio_intrmod_resp)); sc->iq_no = lio->linfo.txpciq[0].s.q_no; lio_prepare_soft_command(oct_dev, sc, LIO_OPCODE_NIC, LIO_OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0); sc->callback = lio_get_intrmod_callback; sc->callback_arg = sc; sc->wait_time = 1000; retval = lio_send_soft_command(oct_dev, sc); if (retval == LIO_IQ_SEND_FAILED) { lio_free_soft_command(oct_dev, sc); return (EINVAL); } return (0); } static void lio_set_intrmod_callback(struct octeon_device *oct_dev, uint32_t status, void *ptr) { struct lio_soft_command *sc = (struct lio_soft_command *)ptr; struct lio_intrmod_context *ctx; ctx = (struct lio_intrmod_context *)sc->ctxptr; ctx->status = status; ctx->cond = 1; /* * This barrier is required to be sure that the response has been * written fully before waking up the handler */ wmb(); } /* Configure interrupt moderation parameters */ static int lio_set_intrmod_cfg(struct lio *lio, struct octeon_intrmod_cfg *intr_cfg) { struct lio_soft_command *sc; struct lio_intrmod_context *ctx; struct octeon_intrmod_cfg *cfg; struct octeon_device *oct_dev = lio->oct_dev; int retval; /* Alloc soft command */ sc = lio_alloc_soft_command(oct_dev, sizeof(struct octeon_intrmod_cfg), 0, sizeof(struct lio_intrmod_context)); if (sc == NULL) return (ENOMEM); ctx = (struct lio_intrmod_context *)sc->ctxptr; ctx->cond = 0; ctx->octeon_id = lio_get_device_id(oct_dev); cfg = (struct octeon_intrmod_cfg *)sc->virtdptr; memcpy(cfg, intr_cfg, sizeof(struct octeon_intrmod_cfg)); lio_swap_8B_data((uint64_t *)cfg, (sizeof(struct octeon_intrmod_cfg)) / 8); sc->iq_no = lio->linfo.txpciq[0].s.q_no; lio_prepare_soft_command(oct_dev, sc, LIO_OPCODE_NIC, LIO_OPCODE_NIC_INTRMOD_CFG, 0, 0, 0); sc->callback = lio_set_intrmod_callback; sc->callback_arg = sc; sc->wait_time = 1000; retval = lio_send_soft_command(oct_dev, sc); if (retval == LIO_IQ_SEND_FAILED) { lio_free_soft_command(oct_dev, sc); return (EINVAL); } /* * Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ lio_sleep_cond(oct_dev, &ctx->cond); retval = ctx->status; if (retval) lio_dev_err(oct_dev, "intrmod config failed. Status: %llx\n", LIO_CAST64(retval)); else - lio_dev_info(oct_dev, "Rx-Adaptive Interrupt moderation enabled:%lx\n", - intr_cfg->rx_enable); + lio_dev_info(oct_dev, "Rx-Adaptive Interrupt moderation enabled:%llx\n", + LIO_CAST64(intr_cfg->rx_enable)); lio_free_soft_command(oct_dev, sc); return ((retval) ? ETIMEDOUT : 0); } static int lio_intrmod_cfg_rx_intrcnt(struct lio *lio, struct octeon_intrmod_cfg *intrmod, uint32_t rx_max_frames) { struct octeon_device *oct = lio->oct_dev; uint32_t rx_max_coalesced_frames; /* Config Cnt based interrupt values */ switch (oct->chip_id) { case LIO_CN23XX_PF_VID:{ int q_no; if (!rx_max_frames) rx_max_coalesced_frames = intrmod->rx_frames; else rx_max_coalesced_frames = rx_max_frames; for (q_no = 0; q_no < oct->num_oqs; q_no++) { q_no += oct->sriov_info.pf_srn; lio_write_csr64(oct, LIO_CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), (lio_read_csr64(oct, LIO_CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) & (0x3fffff00000000UL)) | (rx_max_coalesced_frames - 1)); /* consider setting resend bit */ } intrmod->rx_frames = rx_max_coalesced_frames; oct->rx_max_coalesced_frames = rx_max_coalesced_frames; break; } default: return (EINVAL); } return (0); } static int lio_intrmod_cfg_rx_intrtime(struct lio *lio, struct octeon_intrmod_cfg *intrmod, uint32_t rx_usecs) { struct octeon_device *oct = lio->oct_dev; uint32_t rx_coalesce_usecs; /* Config Time based interrupt values */ switch (oct->chip_id) { case LIO_CN23XX_PF_VID:{ uint64_t time_threshold; int q_no; if (!rx_usecs) rx_coalesce_usecs = intrmod->rx_usecs; else rx_coalesce_usecs = rx_usecs; time_threshold = lio_cn23xx_pf_get_oq_ticks(oct, rx_coalesce_usecs); for (q_no = 0; q_no < oct->num_oqs; q_no++) { q_no += oct->sriov_info.pf_srn; lio_write_csr64(oct, LIO_CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), (intrmod->rx_frames | ((uint64_t)time_threshold << 32))); /* consider writing to resend bit here */ } intrmod->rx_usecs = rx_coalesce_usecs; oct->rx_coalesce_usecs = rx_coalesce_usecs; break; } default: return (EINVAL); } return (0); } static int lio_intrmod_cfg_tx_intrcnt(struct lio *lio, struct octeon_intrmod_cfg *intrmod, uint32_t tx_max_frames) { struct octeon_device *oct = lio->oct_dev; uint64_t val; uint32_t iq_intr_pkt; uint32_t inst_cnt_reg; /* Config Cnt based interrupt values */ switch (oct->chip_id) { case LIO_CN23XX_PF_VID:{ int q_no; if (!tx_max_frames) iq_intr_pkt = LIO_CN23XX_DEF_IQ_INTR_THRESHOLD & LIO_CN23XX_PKT_IN_DONE_WMARK_MASK; else iq_intr_pkt = tx_max_frames & LIO_CN23XX_PKT_IN_DONE_WMARK_MASK; for (q_no = 0; q_no < oct->num_iqs; q_no++) { inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg; val = lio_read_csr64(oct, inst_cnt_reg); /* * clear wmark and count.dont want to write * count back */ val = (val & 0xFFFF000000000000ULL) | ((uint64_t)(iq_intr_pkt - 1) << LIO_CN23XX_PKT_IN_DONE_WMARK_BIT_POS); lio_write_csr64(oct, inst_cnt_reg, val); /* consider setting resend bit */ } intrmod->tx_frames = iq_intr_pkt; oct->tx_max_coalesced_frames = iq_intr_pkt; break; } default: return (-EINVAL); } return (0); } static int lio_get_set_intr_coalesce(SYSCTL_HANDLER_ARGS) { struct lio *lio = (struct lio *)arg1; struct octeon_device *oct = lio->oct_dev; uint64_t new_val = 0, old_val = 0; uint32_t rx_coalesce_usecs = 0; uint32_t rx_max_coalesced_frames = 0; uint32_t tx_coalesce_usecs = 0; int err, ret; switch (arg2) { case LIO_USE_ADAPTIVE_RX_COALESCE: if (lio->intrmod_cfg.rx_enable) new_val = old_val = lio->intrmod_cfg.rx_enable; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); lio->intrmod_cfg.rx_enable = new_val ? 1 : 0; break; case LIO_USE_ADAPTIVE_TX_COALESCE: if (lio->intrmod_cfg.tx_enable) new_val = old_val = lio->intrmod_cfg.tx_enable; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); lio->intrmod_cfg.tx_enable = new_val ? 1 : 0; break; case LIO_RX_COALESCE_USECS: if (!lio->intrmod_cfg.rx_enable) new_val = old_val = oct->rx_coalesce_usecs; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); rx_coalesce_usecs = new_val; break; case LIO_RX_MAX_COALESCED_FRAMES: if (!lio->intrmod_cfg.rx_enable) new_val = old_val = oct->rx_max_coalesced_frames; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); rx_max_coalesced_frames = new_val; break; case LIO_TX_MAX_COALESCED_FRAMES: if (!lio->intrmod_cfg.tx_enable) new_val = old_val = oct->tx_max_coalesced_frames; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); tx_coalesce_usecs = new_val; break; case LIO_PKT_RATE_LOW: if (lio->intrmod_cfg.rx_enable) new_val = old_val = lio->intrmod_cfg.minpkt_ratethr; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); if (lio->intrmod_cfg.rx_enable || lio->intrmod_cfg.tx_enable) lio->intrmod_cfg.minpkt_ratethr = new_val; break; case LIO_RX_COALESCE_USECS_LOW: if (lio->intrmod_cfg.rx_enable) new_val = old_val = lio->intrmod_cfg.rx_mintmr_trigger; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); if (lio->intrmod_cfg.rx_enable) lio->intrmod_cfg.rx_mintmr_trigger = new_val; break; case LIO_RX_MAX_COALESCED_FRAMES_LOW: if (lio->intrmod_cfg.rx_enable) new_val = old_val = lio->intrmod_cfg.rx_mincnt_trigger; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); if (lio->intrmod_cfg.rx_enable) lio->intrmod_cfg.rx_mincnt_trigger = new_val; break; case LIO_TX_MAX_COALESCED_FRAMES_LOW: if (lio->intrmod_cfg.tx_enable) new_val = old_val = lio->intrmod_cfg.tx_mincnt_trigger; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); if (lio->intrmod_cfg.tx_enable) lio->intrmod_cfg.tx_mincnt_trigger = new_val; break; case LIO_PKT_RATE_HIGH: if (lio->intrmod_cfg.rx_enable) new_val = old_val = lio->intrmod_cfg.maxpkt_ratethr; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); if (lio->intrmod_cfg.rx_enable || lio->intrmod_cfg.tx_enable) lio->intrmod_cfg.maxpkt_ratethr = new_val; break; case LIO_RX_COALESCE_USECS_HIGH: if (lio->intrmod_cfg.rx_enable) new_val = old_val = lio->intrmod_cfg.rx_maxtmr_trigger; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); if (lio->intrmod_cfg.rx_enable) lio->intrmod_cfg.rx_maxtmr_trigger = new_val; break; case LIO_RX_MAX_COALESCED_FRAMES_HIGH: if (lio->intrmod_cfg.rx_enable) new_val = old_val = lio->intrmod_cfg.rx_maxcnt_trigger; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); if (lio->intrmod_cfg.rx_enable) lio->intrmod_cfg.rx_maxcnt_trigger = new_val; break; case LIO_TX_MAX_COALESCED_FRAMES_HIGH: if (lio->intrmod_cfg.tx_enable) new_val = old_val = lio->intrmod_cfg.tx_maxcnt_trigger; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); if (lio->intrmod_cfg.tx_enable) lio->intrmod_cfg.tx_maxcnt_trigger = new_val; break; case LIO_RATE_SAMPLE_INTERVAL: if (lio->intrmod_cfg.rx_enable) new_val = old_val = lio->intrmod_cfg.check_intrvl; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); if (lio->intrmod_cfg.rx_enable || lio->intrmod_cfg.tx_enable) lio->intrmod_cfg.check_intrvl = new_val; break; default: return (EINVAL); } lio->intrmod_cfg.rx_usecs = LIO_GET_OQ_INTR_TIME_CFG(lio_get_conf(oct)); lio->intrmod_cfg.rx_frames = LIO_GET_OQ_INTR_PKT_CFG(lio_get_conf(oct)); lio->intrmod_cfg.tx_frames = LIO_GET_IQ_INTR_PKT_CFG(lio_get_conf(oct)); ret = lio_set_intrmod_cfg(lio, &lio->intrmod_cfg); if (ret) lio_dev_err(oct, "Interrupt coalescing updation to Firmware failed!\n"); if (!lio->intrmod_cfg.rx_enable) { if (!rx_coalesce_usecs) rx_coalesce_usecs = oct->rx_coalesce_usecs; if (!rx_max_coalesced_frames) rx_max_coalesced_frames = oct->rx_max_coalesced_frames; ret = lio_intrmod_cfg_rx_intrtime(lio, &lio->intrmod_cfg, rx_coalesce_usecs); if (ret) return (ret); ret = lio_intrmod_cfg_rx_intrcnt(lio, &lio->intrmod_cfg, rx_max_coalesced_frames); if (ret) return (ret); } else { oct->rx_coalesce_usecs = LIO_GET_OQ_INTR_TIME_CFG(lio_get_conf(oct)); oct->rx_max_coalesced_frames = LIO_GET_OQ_INTR_PKT_CFG(lio_get_conf(oct)); } if (!lio->intrmod_cfg.tx_enable) { if (!tx_coalesce_usecs) tx_coalesce_usecs = oct->tx_max_coalesced_frames; ret = lio_intrmod_cfg_tx_intrcnt(lio, &lio->intrmod_cfg, tx_coalesce_usecs); if (ret) return (ret); } else { oct->tx_max_coalesced_frames = LIO_GET_IQ_INTR_PKT_CFG(lio_get_conf(oct)); } return (0); } Index: head/sys/modules/Makefile =================================================================== --- head/sys/modules/Makefile (revision 324993) +++ head/sys/modules/Makefile (revision 324994) @@ -1,837 +1,837 @@ # $FreeBSD$ SYSDIR?=${SRCTOP}/sys .include "${SYSDIR}/conf/kern.opts.mk" SUBDIR_PARALLEL= # Modules that include binary-only blobs of microcode should be selectable by # MK_SOURCELESS_UCODE option (see below). .if defined(MODULES_OVERRIDE) && !defined(ALL_MODULES) SUBDIR=${MODULES_OVERRIDE} .else SUBDIR= \ ${_3dfx} \ ${_3dfx_linux} \ ${_aac} \ ${_aacraid} \ accf_data \ accf_dns \ accf_http \ acl_nfs4 \ acl_posix1e \ ${_acpi} \ ae \ ${_aesni} \ age \ ${_agp} \ aha \ ahci \ ${_aic} \ aic7xxx \ alc \ ale \ alq \ ${_amd_ecc_inject} \ ${_amdsbwd} \ ${_amdsmn} \ ${_amdtemp} \ amr \ ${_an} \ ${_aout} \ ${_apm} \ ${_arcmsr} \ ${_arcnet} \ ${_armv8crypto} \ ${_asmc} \ ata \ ath \ ath_dfs \ ath_hal \ ath_hal_ar5210 \ ath_hal_ar5211 \ ath_hal_ar5212 \ ath_hal_ar5416 \ ath_hal_ar9300 \ ath_main \ ath_rate \ ath_pci \ ${_autofs} \ ${_auxio} \ ${_bce} \ bfe \ bge \ bhnd \ ${_bxe} \ ${_bios} \ ${_bktr} \ ${_bm} \ bnxt \ bridgestp \ bwi \ bwn \ bwn_pci \ ${_bytgpio} \ cam \ ${_cardbus} \ ${_carp} \ cas \ ${_cbb} \ cc \ cd9660 \ cd9660_iconv \ ${_ce} \ ${_cfi} \ chacha20 \ ${_chromebook_platform} \ ${_ciss} \ cloudabi \ ${_cloudabi32} \ ${_cloudabi64} \ ${_cm} \ ${_cmx} \ ${_coff} \ ${_coretemp} \ ${_cp} \ ${_cpsw} \ ${_cpuctl} \ ${_cpufreq} \ ${_crypto} \ ${_cryptodev} \ ${_cs} \ ${_ctau} \ ctl \ ${_cxgb} \ ${_cxgbe} \ dc \ dcons \ dcons_crom \ de \ ${_dpms} \ ${_dpt} \ ${_drm} \ ${_drm2} \ dummynet \ ${_ed} \ ${_efirt} \ ${_elink} \ ${_em} \ ${_ena} \ ${_ep} \ ${_epic} \ esp \ ${_et} \ evdev \ ${_ex} \ ${_exca} \ ext2fs \ fdc \ fdescfs \ ${_fe} \ ${_ffec} \ filemon \ firewire \ firmware \ fuse \ ${_fxp} \ gem \ geom \ ${_glxiic} \ ${_glxsb} \ gpio \ hifn \ hme \ ${_hpt27xx} \ ${_hptiop} \ ${_hptmv} \ ${_hptnr} \ ${_hptrr} \ hwpmc \ ${_hwpmc_mips24k} \ ${_hwpmc_mips74k} \ ${_hyperv} \ i2c \ ${_ibcore} \ ${_ibcs2} \ ${_ichwd} \ ${_ida} \ if_bridge \ if_disc \ if_edsc \ ${_if_enc} \ if_epair \ ${_if_gif} \ ${_if_gre} \ ${_if_me} \ if_lagg \ ${_if_ndis} \ ${_if_stf} \ if_tap \ if_tun \ if_vlan \ if_vxlan \ ${_iir} \ imgact_binmisc \ ${_intelspi} \ ${_io} \ ${_ioat} \ ${_ipoib} \ ${_ipdivert} \ ${_ipfilter} \ ${_ipfw} \ ipfw_nat \ ${_ipfw_nat64} \ ${_ipfw_nptv6} \ ${_ipfw_pmod} \ ${_ipmi} \ ip6_mroute_mod \ ip_mroute_mod \ ${_ips} \ ${_ipsec} \ ${_ipw} \ ${_ipwfw} \ ${_isci} \ ${_iser} \ isp \ ${_ispfw} \ ${_iwi} \ ${_iwifw} \ ${_iwm} \ ${_iwmfw} \ ${_iwn} \ ${_iwnfw} \ ${_ix} \ ${_ixv} \ ${_ixgb} \ ${_ixl} \ ${_ixlv} \ jme \ joy \ kbdmux \ kgssapi \ kgssapi_krb5 \ khelp \ krpc \ ksyms \ le \ lge \ libalias \ libiconv \ libmchain \ ${_linprocfs} \ ${_linsysfs} \ ${_linux} \ ${_linux_common} \ ${_linux64} \ linuxkpi \ ${_lio} \ lmc \ lpt \ mac_biba \ mac_bsdextended \ mac_ifoff \ mac_lomac \ mac_mls \ mac_none \ mac_partition \ mac_portacl \ mac_seeotheruids \ mac_stub \ mac_test \ malo \ md \ mdio \ mem \ mfi \ mii \ mlx \ ${_mlx4} \ ${_mlx4ib} \ ${_mlx4en} \ ${_mlx5} \ ${_mlx5en} \ ${_mlx5ib} \ ${_mly} \ mmc \ mmcsd \ mpr \ mps \ mpt \ mqueue \ mrsas \ msdosfs \ msdosfs_iconv \ ${_mse} \ msk \ ${_mthca} \ mvs \ mwl \ ${_mwlfw} \ mxge \ my \ ${_nandfs} \ ${_nandsim} \ ${_ncr} \ ${_nctgpio} \ ${_ncv} \ ${_ndis} \ ${_netgraph} \ ${_nfe} \ nfscl \ nfscommon \ nfsd \ nfslock \ nfslockd \ nfssvc \ nge \ nmdm \ ${_nsp} \ nullfs \ ${_ntb} \ ${_nvd} \ ${_nvme} \ ${_nvram} \ ${_nxge} \ oce \ otus \ ${_otusfw} \ ow \ ${_padlock} \ ${_padlock_rng} \ ${_pccard} \ ${_pcfclock} \ pcn \ ${_pf} \ ${_pflog} \ ${_pfsync} \ plip \ ${_pms} \ ppbus \ ppc \ ppi \ pps \ procfs \ proto \ pseudofs \ ${_pst} \ pty \ puc \ ${_qlxge} \ ${_qlxgb} \ ${_qlxgbe} \ ${_qlnx} \ ral \ ${_ralfw} \ ${_random_fortuna} \ ${_random_yarrow} \ ${_random_other} \ rc4 \ ${_rdma} \ ${_rdrand_rng} \ re \ rl \ rtwn \ rtwn_pci \ rtwn_usb \ ${_rtwnfw} \ ${_s3} \ ${_safe} \ ${_sbni} \ scc \ ${_scsi_low} \ sdhci \ ${_sdhci_acpi} \ sdhci_pci \ sem \ send \ ${_sf} \ ${_sfxge} \ sge \ ${_sgx} \ ${_sgx_linux} \ siba_bwn \ siftr \ siis \ sis \ sk \ smbfs \ sn \ snp \ sound \ ${_speaker} \ spigen \ ${_splash} \ ${_sppp} \ ste \ ${_stg} \ stge \ ${_sym} \ ${_syscons} \ sysvipc \ tcp \ ${_ti} \ tl \ tmpfs \ ${_toecore} \ ${_tpm} \ trm \ ${_twa} \ twe \ tws \ tx \ ${_txp} \ uart \ ubsec \ udf \ udf_iconv \ ufs \ uinput \ unionfs \ usb \ ${_vesa} \ ${_virtio} \ vge \ ${_viawd} \ videomode \ vkbd \ ${_vmm} \ ${_vmware} \ ${_vpo} \ vr \ vte \ vx \ ${_vxge} \ wb \ ${_wbwd} \ ${_wi} \ wlan \ wlan_acl \ wlan_amrr \ wlan_ccmp \ wlan_rssadapt \ wlan_tkip \ wlan_wep \ wlan_xauth \ ${_wpi} \ ${_wpifw} \ ${_x86bios} \ ${_xe} \ xl \ zlib .if ${MK_AUTOFS} != "no" || defined(ALL_MODULES) _autofs= autofs .endif .if ${MK_CDDL} != "no" || defined(ALL_MODULES) .if (${MACHINE_CPUARCH} != "arm" || ${MACHINE_ARCH:Marmv[67]*} != "") && \ ${MACHINE_CPUARCH} != "mips" && \ ${MACHINE_CPUARCH} != "sparc64" SUBDIR+= dtrace .endif SUBDIR+= opensolaris .endif .if ${MK_CRYPT} != "no" || defined(ALL_MODULES) .if exists(${SRCTOP}/sys/opencrypto) _crypto= crypto _cryptodev= cryptodev _random_fortuna=random_fortuna _random_yarrow= random_yarrow _random_other= random_other .endif .endif .if ${MK_CUSE} != "no" || defined(ALL_MODULES) SUBDIR+= cuse .endif .if (${MK_INET_SUPPORT} != "no" || ${MK_INET6_SUPPORT} != "no") || \ defined(ALL_MODULES) _carp= carp _toecore= toecore _if_enc= if_enc _if_gif= if_gif _if_gre= if_gre _ipfw_pmod= ipfw_pmod .if ${MK_IPSEC_SUPPORT} != "no" _ipsec= ipsec .endif .endif .if (${MK_INET_SUPPORT} != "no" && ${MK_INET6_SUPPORT} != "no") || \ defined(ALL_MODULES) _if_stf= if_stf .endif .if ${MK_INET_SUPPORT} != "no" || defined(ALL_MODULES) _if_me= if_me _ipdivert= ipdivert _ipfw= ipfw .if ${MK_INET6_SUPPORT} != "no" || defined(ALL_MODULES) _ipfw_nat64= ipfw_nat64 .endif .endif .if ${MK_INET6_SUPPORT} != "no" || defined(ALL_MODULES) _ipfw_nptv6= ipfw_nptv6 .endif .if ${MK_IPFILTER} != "no" || defined(ALL_MODULES) _ipfilter= ipfilter .endif .if ${MK_ISCSI} != "no" || defined(ALL_MODULES) SUBDIR+= cfiscsi SUBDIR+= iscsi SUBDIR+= iscsi_initiator .endif .if ${MK_NAND} != "no" || defined(ALL_MODULES) _nandfs= nandfs _nandsim= nandsim .endif .if ${MK_NETGRAPH} != "no" || defined(ALL_MODULES) _netgraph= netgraph .endif .if (${MK_PF} != "no" && (${MK_INET_SUPPORT} != "no" || \ ${MK_INET6_SUPPORT} != "no")) || defined(ALL_MODULES) _pf= pf _pflog= pflog .if ${MK_INET_SUPPORT} != "no" _pfsync= pfsync .endif .endif .if ${MK_SOURCELESS_UCODE} != "no" _bce= bce _fxp= fxp _ispfw= ispfw _sf= sf _ti= ti _txp= txp .if ${MACHINE_CPUARCH} != "mips" _mwlfw= mwlfw _otusfw= otusfw _ralfw= ralfw _rtwnfw= rtwnfw .endif .endif .if ${MK_SOURCELESS_UCODE} != "no" && ${MACHINE_CPUARCH} != "arm" && \ ${MACHINE_CPUARCH} != "mips" && \ ${MACHINE_ARCH} != "powerpc" && ${MACHINE_ARCH} != "powerpcspe" && \ ${MACHINE_CPUARCH} != "riscv" _cxgbe= cxgbe .endif .if ${MK_TESTS} != "no" || defined(ALL_MODULES) SUBDIR+= tests .endif .if ${MK_ZFS} != "no" || defined(ALL_MODULES) SUBDIR+= zfs .endif .if (${MACHINE_CPUARCH} == "mips" && ${MACHINE_ARCH:Mmips64} == "") _hwpmc_mips24k= hwpmc_mips24k _hwpmc_mips74k= hwpmc_mips74k .endif .if ${MACHINE_CPUARCH} != "aarch64" && ${MACHINE_CPUARCH} != "arm" && \ ${MACHINE_CPUARCH} != "mips" && ${MACHINE_CPUARCH} != "powerpc" && \ ${MACHINE_CPUARCH} != "riscv" _syscons= syscons _vpo= vpo .endif .if ${MACHINE_CPUARCH} != "mips" # no BUS_SPACE_UNSPECIFIED # No barrier instruction support (specific to this driver) _sym= sym # intr_disable() is a macro, causes problems .if ${MK_SOURCELESS_UCODE} != "no" _cxgb= cxgb .endif .endif .if ${MACHINE_CPUARCH} == "aarch64" _armv8crypto= armv8crypto _efirt= efirt _em= em .endif .if ${MACHINE_CPUARCH} == "i386" || ${MACHINE_CPUARCH} == "amd64" _agp= agp _an= an _aout= aout _bios= bios _bktr= bktr .if ${MK_SOURCELESS_UCODE} != "no" _bxe= bxe .endif _cardbus= cardbus _cbb= cbb _cpuctl= cpuctl _cpufreq= cpufreq _cs= cs _dpms= dpms _drm= drm _drm2= drm2 _ed= ed _em= em _ena= ena _ep= ep _et= et _exca= exca _fe= fe .if ${MK_OFED} != "no" || defined(ALL_MODULES) _ibcore= ibcore .endif _if_ndis= if_ndis _io= io .if ${MK_OFED} != "no" || defined(ALL_MODULES) _ipoib= ipoib _iser= iser .endif _ix= ix _ixv= ixv _linprocfs= linprocfs _linsysfs= linsysfs _linux= linux +.if ${MK_SOURCELESS_UCODE} != "no" +_lio= lio +.endif _nctgpio= nctgpio _ndis= ndis _pccard= pccard .if ${MK_OFED} != "no" || defined(ALL_MODULES) _rdma= rdma .endif _safe= safe _scsi_low= scsi_low _speaker= speaker _splash= splash _sppp= sppp _vmware= vmware _vxge= vxge _wbwd= wbwd _wi= wi _xe= xe _aac= aac _aacraid= aacraid _acpi= acpi .if ${MK_CRYPT} != "no" || defined(ALL_MODULES) _aesni= aesni .endif _amd_ecc_inject=amd_ecc_inject _amdsbwd= amdsbwd _amdsmn= amdsmn _amdtemp= amdtemp _arcmsr= arcmsr _asmc= asmc _bytgpio= bytgpio _ciss= ciss _chromebook_platform= chromebook_platform _cmx= cmx _coretemp= coretemp .if ${MK_SOURCELESS_HOST} != "no" _hpt27xx= hpt27xx .endif _hptiop= hptiop .if ${MK_SOURCELESS_HOST} != "no" _hptmv= hptmv _hptnr= hptnr _hptrr= hptrr .endif _hyperv= hyperv _ichwd= ichwd _ida= ida _iir= iir _intelspi= intelspi _ipmi= ipmi _ips= ips _isci= isci _ipw= ipw _iwi= iwi _iwm= iwm _iwn= iwn _ixgb= ixgb .if ${MK_SOURCELESS_UCODE} != "no" _ipwfw= ipwfw _iwifw= iwifw _iwmfw= iwmfw _iwnfw= iwnfw .endif _mlx4= mlx4 _mlx5= mlx5 .if (${MK_INET_SUPPORT} != "no" && ${MK_INET6_SUPPORT} != "no") || \ defined(ALL_MODULES) _mlx4en= mlx4en _mlx5en= mlx5en .endif .if ${MK_OFED} != "no" || defined(ALL_MODULES) _mlx4ib= mlx4ib _mlx5ib= mlx5ib .endif _mly= mly .if ${MK_OFED} != "no" || defined(ALL_MODULES) _mthca= mthca .endif _nfe= nfe _nvd= nvd _nvme= nvme _nvram= nvram _nxge= nxge .if ${MK_CRYPT} != "no" || defined(ALL_MODULES) _padlock= padlock _padlock_rng= padlock_rng _rdrand_rng= rdrand_rng .endif _s3= s3 _sdhci_acpi= sdhci_acpi _tpm= tpm _twa= twa _vesa= vesa _viawd= viawd _virtio= virtio _wpi= wpi .if ${MK_SOURCELESS_UCODE} != "no" _wpifw= wpifw .endif _x86bios= x86bios .endif .if ${MACHINE_CPUARCH} == "amd64" _efirt= efirt _ioat= ioat _ixl= ixl _ixlv= ixlv _linux64= linux64 _linux_common= linux_common -.if ${MK_SOURCELESS_UCODE} != "no" -_lio= lio -.endif _ntb= ntb _pms= pms _qlxge= qlxge _qlxgb= qlxgb .if ${MK_SOURCELESS_UCODE} != "no" _qlxgbe= qlxgbe _qlnx= qlnx .endif _sfxge= sfxge _sgx= sgx _sgx_linux= sgx_linux .if ${MK_BHYVE} != "no" || defined(ALL_MODULES) _vmm= vmm .endif .endif .if ${MACHINE_CPUARCH} == "i386" # XXX some of these can move to the general case when de-i386'ed # XXX some of these can move now, but are untested on other architectures. _3dfx= 3dfx _3dfx_linux= 3dfx_linux _aic= aic _apm= apm _arcnet= arcnet .if ${MK_SOURCELESS_UCODE} != "no" _ce= ce .endif _coff= coff .if ${MK_SOURCELESS_UCODE} != "no" _cp= cp .endif _elink= elink _glxiic= glxiic _glxsb= glxsb #_ibcs2= ibcs2 _mse= mse _ncr= ncr _ncv= ncv _nsp= nsp _pcfclock= pcfclock _pst= pst _sbni= sbni _stg= stg _cm= cm .if ${MK_SOURCELESS_UCODE} != "no" _ctau= ctau .endif _dpt= dpt _ex= ex .endif .if ${MACHINE_CPUARCH} == "arm" _cfi= cfi _cpsw= cpsw .endif .if ${MACHINE_CPUARCH} == "powerpc" _agp= agp _an= an _bm= bm _cardbus= cardbus _cbb= cbb _cfi= cfi _cpufreq= cpufreq _drm= drm _exca= exca _ffec= ffec _pccard= pccard _wi= wi .endif .if ${MACHINE_ARCH} == "powerpc64" _drm2= drm2 .endif .if ${MACHINE_ARCH} == "powerpc64" || ${MACHINE_ARCH} == "powerpc" # Don't build powermac_nvram for powerpcspe, it's never supported. _nvram= powermac_nvram .endif .if ${MACHINE_CPUARCH} == "sparc64" _auxio= auxio _em= em _epic= epic .endif .if (${MACHINE_CPUARCH} == "amd64" || ${MACHINE_ARCH:Marmv[67]*} != "" || \ ${MACHINE_CPUARCH} == "i386") _cloudabi32= cloudabi32 .endif .if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" _cloudabi64= cloudabi64 .endif .endif .if ${MACHINE_ARCH:Marmv[67]*} != "" _ffec= ffec .endif SUBDIR+=${MODULES_EXTRA} .for reject in ${WITHOUT_MODULES} SUBDIR:= ${SUBDIR:N${reject}} .endfor # Calling kldxref(8) for each module is expensive. .if !defined(NO_XREF) .MAKEFLAGS+= -DNO_XREF afterinstall: .PHONY @if type kldxref >/dev/null 2>&1; then \ ${ECHO} kldxref ${DESTDIR}${KMODDIR}; \ kldxref ${DESTDIR}${KMODDIR}; \ fi .endif .include "${SYSDIR}/conf/config.mk" SUBDIR:= ${SUBDIR:u:O} .include