diff --git a/sys/dev/liquidio/lio_core.c b/sys/dev/liquidio/lio_core.c index dcbe4e44988d..ea6949f0354d 100644 --- a/sys/dev/liquidio/lio_core.c +++ b/sys/dev/liquidio/lio_core.c @@ -1,687 +1,687 @@ /* * BSD LICENSE * * Copyright(c) 2017 Cavium, Inc.. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Cavium, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "lio_bsd.h" #include "lio_common.h" #include "lio_droq.h" #include "lio_iq.h" #include "lio_response_manager.h" #include "lio_device.h" #include "lio_ctrl.h" #include "lio_main.h" #include "lio_rxtx.h" #include "lio_network.h" int -lio_set_feature(struct ifnet *ifp, int cmd, uint16_t param1) +lio_set_feature(if_t ifp, int cmd, uint16_t param1) { struct lio_ctrl_pkt nctrl; struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; int ret = 0; bzero(&nctrl, sizeof(struct lio_ctrl_pkt)); nctrl.ncmd.cmd64 = 0; nctrl.ncmd.s.cmd = cmd; nctrl.ncmd.s.param1 = param1; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 100; nctrl.lio = lio; nctrl.cb_fn = lio_ctrl_cmd_completion; ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { lio_dev_err(oct, "Feature change failed in core (ret: 0x%x)\n", ret); } return (ret); } void lio_ctrl_cmd_completion(void *nctrl_ptr) { struct lio_ctrl_pkt *nctrl = (struct lio_ctrl_pkt *)nctrl_ptr; struct lio *lio; struct octeon_device *oct; uint8_t *mac; lio = nctrl->lio; if (lio->oct_dev == NULL) return; oct = lio->oct_dev; switch (nctrl->ncmd.s.cmd) { case LIO_CMD_CHANGE_DEVFLAGS: case LIO_CMD_SET_MULTI_LIST: break; case LIO_CMD_CHANGE_MACADDR: mac = ((uint8_t *)&nctrl->udd[0]) + 2; if (nctrl->ncmd.s.param1) { /* vfidx is 0 based, but vf_num (param1) is 1 based */ int vfidx = nctrl->ncmd.s.param1 - 1; bool mac_is_admin_assigned = nctrl->ncmd.s.param2; if (mac_is_admin_assigned) lio_dev_info(oct, "MAC Address %pM is configured for VF %d\n", mac, vfidx); } else { lio_dev_info(oct, "MAC Address changed to %02x:%02x:%02x:%02x:%02x:%02x\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); } break; case LIO_CMD_GPIO_ACCESS: lio_dev_info(oct, "LED Flashing visual identification\n"); break; case LIO_CMD_ID_ACTIVE: lio_dev_info(oct, "LED Flashing visual identification\n"); break; case LIO_CMD_LRO_ENABLE: lio_dev_info(oct, "HW LRO Enabled\n"); break; case LIO_CMD_LRO_DISABLE: lio_dev_info(oct, "HW LRO Disabled\n"); break; case LIO_CMD_VERBOSE_ENABLE: lio_dev_info(oct, "Firmware debug enabled\n"); break; case LIO_CMD_VERBOSE_DISABLE: lio_dev_info(oct, "Firmware debug disabled\n"); break; case LIO_CMD_VLAN_FILTER_CTL: if (nctrl->ncmd.s.param1) lio_dev_info(oct, "VLAN filter enabled\n"); else lio_dev_info(oct, "VLAN filter disabled\n"); break; case LIO_CMD_ADD_VLAN_FILTER: lio_dev_info(oct, "VLAN filter %d added\n", nctrl->ncmd.s.param1); break; case LIO_CMD_DEL_VLAN_FILTER: lio_dev_info(oct, "VLAN filter %d removed\n", nctrl->ncmd.s.param1); break; case LIO_CMD_SET_SETTINGS: lio_dev_info(oct, "Settings changed\n"); break; /* * Case to handle "LIO_CMD_TNL_RX_CSUM_CTL" * Command passed by NIC driver */ case LIO_CMD_TNL_RX_CSUM_CTL: if (nctrl->ncmd.s.param1 == LIO_CMD_RXCSUM_ENABLE) { lio_dev_info(oct, "RX Checksum Offload Enabled\n"); } else if (nctrl->ncmd.s.param1 == LIO_CMD_RXCSUM_DISABLE) { lio_dev_info(oct, "RX Checksum Offload Disabled\n"); } break; /* * Case to handle "LIO_CMD_TNL_TX_CSUM_CTL" * Command passed by NIC driver */ case LIO_CMD_TNL_TX_CSUM_CTL: if (nctrl->ncmd.s.param1 == LIO_CMD_TXCSUM_ENABLE) { lio_dev_info(oct, "TX Checksum Offload Enabled\n"); } else if (nctrl->ncmd.s.param1 == LIO_CMD_TXCSUM_DISABLE) { lio_dev_info(oct, "TX Checksum Offload Disabled\n"); } break; /* * Case to handle "LIO_CMD_VXLAN_PORT_CONFIG" * Command passed by NIC driver */ case LIO_CMD_VXLAN_PORT_CONFIG: if (nctrl->ncmd.s.more == LIO_CMD_VXLAN_PORT_ADD) { lio_dev_info(oct, "VxLAN Destination UDP PORT:%d ADDED\n", nctrl->ncmd.s.param1); } else if (nctrl->ncmd.s.more == LIO_CMD_VXLAN_PORT_DEL) { lio_dev_info(oct, "VxLAN Destination UDP PORT:%d DELETED\n", nctrl->ncmd.s.param1); } break; case LIO_CMD_SET_FLOW_CTL: lio_dev_info(oct, "Set RX/TX flow control parameters\n"); break; case LIO_CMD_SET_FNV: if (nctrl->ncmd.s.param1 == LIO_CMD_FNV_ENABLE) lio_dev_info(oct, "FNV Enabled\n"); else if (nctrl->ncmd.s.param1 == LIO_CMD_FNV_DISABLE) lio_dev_info(oct, "FNV Disabled\n"); break; case LIO_CMD_PKT_STEERING_CTL: if (nctrl->ncmd.s.param1 == LIO_CMD_PKT_STEERING_ENABLE) { lio_dev_info(oct, "Packet Steering Enabled\n"); } else if (nctrl->ncmd.s.param1 == LIO_CMD_PKT_STEERING_DISABLE) { lio_dev_info(oct, "Packet Steering Disabled\n"); } break; case LIO_CMD_QUEUE_COUNT_CTL: lio_dev_info(oct, "Queue count updated to %d\n", nctrl->ncmd.s.param1); break; default: lio_dev_err(oct, "%s Unknown cmd %d\n", __func__, nctrl->ncmd.s.cmd); } } /* * \brief Setup output queue * @param oct octeon device * @param q_no which queue * @param num_descs how many descriptors * @param desc_size size of each descriptor * @param app_ctx application context */ static int lio_setup_droq(struct octeon_device *oct, int q_no, int num_descs, int desc_size, void *app_ctx) { int ret_val = 0; lio_dev_dbg(oct, "Creating Droq: %d\n", q_no); /* droq creation and local register settings. */ ret_val = lio_create_droq(oct, q_no, num_descs, desc_size, app_ctx); if (ret_val < 0) return (ret_val); if (ret_val == 1) { lio_dev_dbg(oct, "Using default droq %d\n", q_no); return (0); } /* * Send Credit for Octeon Output queues. Credits are always * sent after the output queue is enabled. */ lio_write_csr32(oct, oct->droq[q_no]->pkts_credit_reg, oct->droq[q_no]->max_count); return (ret_val); } static void lio_push_packet(void *m_buff, uint32_t len, union octeon_rh *rh, void *rxq, void *arg) { struct mbuf *mbuf = m_buff; - struct ifnet *ifp = arg; + if_t ifp = arg; struct lio_droq *droq = rxq; if (ifp != NULL) { struct lio *lio = if_getsoftc(ifp); /* Do not proceed if the interface is not in RUNNING state. */ if (!lio_ifstate_check(lio, LIO_IFSTATE_RUNNING)) { lio_recv_buffer_free(mbuf); droq->stats.rx_dropped++; return; } if (rh->r_dh.has_hash) { uint32_t hashtype, hashval; if (rh->r_dh.has_hwtstamp) { hashval = htobe32(*(uint32_t *) (((uint8_t *)mbuf->m_data) + ((rh->r_dh.len - 2) * BYTES_PER_DHLEN_UNIT))); hashtype = htobe32(*(((uint32_t *) (((uint8_t *)mbuf->m_data) + ((rh->r_dh.len - 2) * BYTES_PER_DHLEN_UNIT))) + 1)); } else { hashval = htobe32(*(uint32_t *) (((uint8_t *)mbuf->m_data) + ((rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT))); hashtype = htobe32(*(((uint32_t *) (((uint8_t *)mbuf->m_data) + ((rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT))) + 1)); } mbuf->m_pkthdr.flowid = hashval; switch (hashtype) { case LIO_RSS_HASH_IPV4: M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4); break; case LIO_RSS_HASH_TCP_IPV4: M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4); break; case LIO_RSS_HASH_IPV6: M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6); break; case LIO_RSS_HASH_TCP_IPV6: M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6); break; case LIO_RSS_HASH_IPV6_EX: M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6_EX); break; case LIO_RSS_HASH_TCP_IPV6_EX: M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6_EX); break; default: M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH); } } else { /* * This case won't hit as FW will always set has_hash * in rh. */ M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE); mbuf->m_pkthdr.flowid = droq->q_no; } m_adj(mbuf, rh->r_dh.len * 8); len -= rh->r_dh.len * 8; mbuf->m_flags |= M_PKTHDR; if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) && (rh->r_dh.priority || rh->r_dh.vlan)) { uint16_t priority = rh->r_dh.priority; uint16_t vid = rh->r_dh.vlan; uint16_t vtag; vtag = priority << 13 | vid; mbuf->m_pkthdr.ether_vtag = vtag; mbuf->m_flags |= M_VLANTAG; } if (rh->r_dh.csum_verified & LIO_IPSUM_VERIFIED) mbuf->m_pkthdr.csum_flags |= (CSUM_L3_CALC | CSUM_L3_VALID); if (rh->r_dh.csum_verified & LIO_L4SUM_VERIFIED) { mbuf->m_pkthdr.csum_flags |= (CSUM_L4_CALC | CSUM_L4_VALID); mbuf->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); mbuf->m_pkthdr.csum_data = htons(0xffff); } mbuf->m_pkthdr.rcvif = ifp; mbuf->m_pkthdr.len = len; if ((lio_hwlro == 0) && (if_getcapenable(ifp) & IFCAP_LRO) && (mbuf->m_pkthdr.csum_flags & (CSUM_L3_VALID | CSUM_L4_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == (CSUM_L3_VALID | CSUM_L4_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { if (droq->lro.lro_cnt) { if (tcp_lro_rx(&droq->lro, mbuf, 0) == 0) { droq->stats.rx_bytes_received += len; droq->stats.rx_pkts_received++; return; } } } if_input(ifp, mbuf); droq->stats.rx_bytes_received += len; droq->stats.rx_pkts_received++; } else { lio_recv_buffer_free(mbuf); droq->stats.rx_dropped++; } } /* * \brief Setup input and output queues * @param octeon_dev octeon device * @param ifidx Interface Index * * Note: Queues are with respect to the octeon device. Thus * an input queue is for egress packets, and output queues * are for ingress packets. */ int lio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx, uint32_t num_iqs, uint32_t num_oqs) { struct lio_droq_ops droq_ops; - struct ifnet *ifp; + if_t ifp; struct lio_droq *droq; struct lio *lio; static int cpu_id, cpu_id_modulus; int num_tx_descs, q, q_no, retval = 0; ifp = octeon_dev->props.ifp; lio = if_getsoftc(ifp); bzero(&droq_ops, sizeof(struct lio_droq_ops)); droq_ops.fptr = lio_push_packet; droq_ops.farg = (void *)ifp; cpu_id = 0; cpu_id_modulus = mp_ncpus; /* set up DROQs. */ for (q = 0; q < num_oqs; q++) { q_no = lio->linfo.rxpciq[q].s.q_no; lio_dev_dbg(octeon_dev, "lio_setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n", q, q_no); retval = lio_setup_droq(octeon_dev, q_no, LIO_GET_NUM_RX_DESCS_NIC_IF_CFG( lio_get_conf(octeon_dev), lio->ifidx), LIO_GET_NUM_RX_BUF_SIZE_NIC_IF_CFG( lio_get_conf(octeon_dev), lio->ifidx), NULL); if (retval) { lio_dev_err(octeon_dev, "%s : Runtime DROQ(RxQ) creation failed.\n", __func__); return (1); } droq = octeon_dev->droq[q_no]; /* designate a CPU for this droq */ droq->cpu_id = cpu_id; cpu_id++; if (cpu_id >= cpu_id_modulus) cpu_id = 0; lio_register_droq_ops(octeon_dev, q_no, &droq_ops); } /* set up IQs. */ for (q = 0; q < num_iqs; q++) { num_tx_descs = LIO_GET_NUM_TX_DESCS_NIC_IF_CFG( lio_get_conf(octeon_dev), lio->ifidx); retval = lio_setup_iq(octeon_dev, ifidx, q, lio->linfo.txpciq[q], num_tx_descs); if (retval) { lio_dev_err(octeon_dev, " %s : Runtime IQ(TxQ) creation failed.\n", __func__); return (1); } } return (0); } /* * \brief Droq packet processor sceduler * @param oct octeon device */ static void lio_schedule_droq_pkt_handlers(struct octeon_device *oct) { struct lio_droq *droq; uint64_t oq_no; if (oct->int_status & LIO_DEV_INTR_PKT_DATA) { for (oq_no = 0; oq_no < LIO_MAX_OUTPUT_QUEUES(oct); oq_no++) { if (!(oct->io_qmask.oq & BIT_ULL(oq_no))) continue; droq = oct->droq[oq_no]; taskqueue_enqueue(droq->droq_taskqueue, &droq->droq_task); } } } static void lio_msix_intr_handler(void *vector) { struct lio_ioq_vector *ioq_vector = (struct lio_ioq_vector *)vector; struct octeon_device *oct = ioq_vector->oct_dev; struct lio_droq *droq = oct->droq[ioq_vector->droq_index]; uint64_t ret; ret = oct->fn_list.msix_interrupt_handler(ioq_vector); if ((ret & LIO_MSIX_PO_INT) || (ret & LIO_MSIX_PI_INT)) { struct lio_instr_queue *iq = oct->instr_queue[droq->q_no]; int reschedule, tx_done = 1; reschedule = lio_droq_process_packets(oct, droq, oct->rx_budget); if (atomic_load_acq_int(&iq->instr_pending)) tx_done = lio_flush_iq(oct, iq, oct->tx_budget); if ((oct->props.ifp != NULL) && (iq->br != NULL)) { if (mtx_trylock(&iq->enq_lock)) { if (!drbr_empty(oct->props.ifp, iq->br)) lio_mq_start_locked(oct->props.ifp, iq); mtx_unlock(&iq->enq_lock); } } if (reschedule || !tx_done) taskqueue_enqueue(droq->droq_taskqueue, &droq->droq_task); else lio_enable_irq(droq, iq); } } static void lio_intr_handler(void *dev) { struct octeon_device *oct = (struct octeon_device *)dev; /* Disable our interrupts for the duration of ISR */ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); oct->fn_list.process_interrupt_regs(oct); lio_schedule_droq_pkt_handlers(oct); /* Re-enable our interrupts */ if (!(atomic_load_acq_int(&oct->status) == LIO_DEV_IN_RESET)) oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); } int lio_setup_interrupt(struct octeon_device *oct, uint32_t num_ioqs) { device_t device; struct lio_ioq_vector *ioq_vector; int cpu_id, err, i; int num_alloc_ioq_vectors; int num_ioq_vectors; int res_id; if (!oct->msix_on) return (1); ioq_vector = oct->ioq_vector; #ifdef RSS if (oct->sriov_info.num_pf_rings != rss_getnumbuckets()) { lio_dev_info(oct, "IOQ vectors (%d) are not equal number of RSS buckets (%d)\n", oct->sriov_info.num_pf_rings, rss_getnumbuckets()); } #endif device = oct->device; oct->num_msix_irqs = num_ioqs; /* one non ioq interrupt for handling sli_mac_pf_int_sum */ oct->num_msix_irqs += 1; num_alloc_ioq_vectors = oct->num_msix_irqs; if (pci_alloc_msix(device, &num_alloc_ioq_vectors) || (num_alloc_ioq_vectors != oct->num_msix_irqs)) goto err; num_ioq_vectors = oct->num_msix_irqs; /* For PF, there is one non-ioq interrupt handler */ for (i = 0; i < num_ioq_vectors - 1; i++, ioq_vector++) { res_id = i + 1; ioq_vector->msix_res = bus_alloc_resource_any(device, SYS_RES_IRQ, &res_id, RF_SHAREABLE | RF_ACTIVE); if (ioq_vector->msix_res == NULL) { lio_dev_err(oct, "Unable to allocate bus res msix[%d]\n", i); goto err_1; } err = bus_setup_intr(device, ioq_vector->msix_res, INTR_TYPE_NET | INTR_MPSAFE, NULL, lio_msix_intr_handler, ioq_vector, &ioq_vector->tag); if (err) { bus_release_resource(device, SYS_RES_IRQ, res_id, ioq_vector->msix_res); ioq_vector->msix_res = NULL; lio_dev_err(oct, "Failed to register intr handler"); goto err_1; } bus_describe_intr(device, ioq_vector->msix_res, ioq_vector->tag, "rxtx%u", i); ioq_vector->vector = res_id; #ifdef RSS cpu_id = rss_getcpu(i % rss_getnumbuckets()); #else cpu_id = i % mp_ncpus; #endif CPU_SETOF(cpu_id, &ioq_vector->affinity_mask); /* Setting the IRQ affinity. */ err = bus_bind_intr(device, ioq_vector->msix_res, cpu_id); if (err) lio_dev_err(oct, "bus bind interrupt fail"); #ifdef RSS lio_dev_dbg(oct, "Bound RSS bucket %d to CPU %d\n", i, cpu_id); #else lio_dev_dbg(oct, "Bound Queue %d to CPU %d\n", i, cpu_id); #endif } lio_dev_dbg(oct, "MSI-X enabled\n"); res_id = num_ioq_vectors; oct->msix_res = bus_alloc_resource_any(device, SYS_RES_IRQ, &res_id, RF_SHAREABLE | RF_ACTIVE); if (oct->msix_res == NULL) { lio_dev_err(oct, "Unable to allocate bus res msix for non-ioq interrupt\n"); goto err_1; } err = bus_setup_intr(device, oct->msix_res, INTR_TYPE_NET | INTR_MPSAFE, NULL, lio_intr_handler, oct, &oct->tag); if (err) { bus_release_resource(device, SYS_RES_IRQ, res_id, oct->msix_res); oct->msix_res = NULL; lio_dev_err(oct, "Failed to register intr handler"); goto err_1; } bus_describe_intr(device, oct->msix_res, oct->tag, "aux"); oct->aux_vector = res_id; return (0); err_1: if (oct->tag != NULL) { bus_teardown_intr(device, oct->msix_res, oct->tag); oct->tag = NULL; } while (i) { i--; ioq_vector--; if (ioq_vector->tag != NULL) { bus_teardown_intr(device, ioq_vector->msix_res, ioq_vector->tag); ioq_vector->tag = NULL; } if (ioq_vector->msix_res != NULL) { bus_release_resource(device, SYS_RES_IRQ, ioq_vector->vector, ioq_vector->msix_res); ioq_vector->msix_res = NULL; } } if (oct->msix_res != NULL) { bus_release_resource(device, SYS_RES_IRQ, oct->aux_vector, oct->msix_res); oct->msix_res = NULL; } err: pci_release_msi(device); lio_dev_err(oct, "MSI-X disabled\n"); return (1); } diff --git a/sys/dev/liquidio/lio_ioctl.c b/sys/dev/liquidio/lio_ioctl.c index f2cf0eae6f81..614c3cd1f868 100644 --- a/sys/dev/liquidio/lio_ioctl.c +++ b/sys/dev/liquidio/lio_ioctl.c @@ -1,548 +1,548 @@ /* * BSD LICENSE * * Copyright(c) 2017 Cavium, Inc.. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Cavium, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "lio_bsd.h" #include "lio_common.h" #include "lio_droq.h" #include "lio_iq.h" #include "lio_response_manager.h" #include "lio_device.h" #include "lio_network.h" #include "lio_ctrl.h" #include "cn23xx_pf_device.h" #include "lio_image.h" #include "lio_ioctl.h" #include "lio_main.h" #include "lio_rxtx.h" -static int lio_set_rx_csum(struct ifnet *ifp, uint32_t data); -static int lio_set_tso4(struct ifnet *ifp); -static int lio_set_tso6(struct ifnet *ifp); -static int lio_set_lro(struct ifnet *ifp); -static int lio_change_mtu(struct ifnet *ifp, int new_mtu); -static int lio_set_mcast_list(struct ifnet *ifp); -static inline enum lio_ifflags lio_get_new_flags(struct ifnet *ifp); +static int lio_set_rx_csum(if_t ifp, uint32_t data); +static int lio_set_tso4(if_t ifp); +static int lio_set_tso6(if_t ifp); +static int lio_set_lro(if_t ifp); +static int lio_change_mtu(if_t ifp, int new_mtu); +static int lio_set_mcast_list(if_t ifp); +static inline enum lio_ifflags lio_get_new_flags(if_t ifp); static inline bool lio_is_valid_ether_addr(const uint8_t *addr) { return (!(0x01 & addr[0]) && !((addr[0] + addr[1] + addr[2] + addr[3] + addr[4] + addr[5]) == 0x00)); } static int -lio_change_dev_flags(struct ifnet *ifp) +lio_change_dev_flags(if_t ifp) { struct lio_ctrl_pkt nctrl; struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; int ret = 0; bzero(&nctrl, sizeof(struct lio_ctrl_pkt)); /* Create a ctrl pkt command to be sent to core app. */ nctrl.ncmd.cmd64 = 0; nctrl.ncmd.s.cmd = LIO_CMD_CHANGE_DEVFLAGS; nctrl.ncmd.s.param1 = lio_get_new_flags(ifp); nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.lio = lio; nctrl.cb_fn = lio_ctrl_cmd_completion; ret = lio_send_ctrl_pkt(oct, &nctrl); if (ret) lio_dev_err(oct, "Failed to change flags ret %d\n", ret); return (ret); } /* * lio_ioctl : User calls this routine for configuring * the interface. * * return 0 on success, positive on failure */ int -lio_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) +lio_ioctl(if_t ifp, u_long cmd, caddr_t data) { struct lio *lio = if_getsoftc(ifp); struct ifreq *ifrequest = (struct ifreq *)data; int error = 0; switch (cmd) { case SIOCSIFADDR: lio_dev_dbg(lio->oct_dev, "ioctl: SIOCSIFADDR\n"); if_setflagbits(ifp, IFF_UP, 0); error = ether_ioctl(ifp, cmd, data); break; case SIOCSIFMTU: lio_dev_dbg(lio->oct_dev, "ioctl: SIOCSIFMTU\n"); error = lio_change_mtu(ifp, ifrequest->ifr_mtu); break; case SIOCSIFFLAGS: lio_dev_dbg(lio->oct_dev, "ioctl: SIOCSIFFLAGS\n"); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { if ((if_getflags(ifp) ^ lio->if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) error = lio_change_dev_flags(ifp); } else { if (!(atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_DETACH)) lio_open(lio); } } else { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) lio_stop(ifp); } lio->if_flags = if_getflags(ifp); break; case SIOCADDMULTI: lio_dev_dbg(lio->oct_dev, "ioctl: SIOCADDMULTI\n"); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) error = lio_set_mcast_list(ifp); break; case SIOCDELMULTI: lio_dev_dbg(lio->oct_dev, "ioctl: SIOCSIFMULTI\n"); break; case SIOCSIFMEDIA: lio_dev_dbg(lio->oct_dev, "ioctl: SIOCSIFMEDIA\n"); case SIOCGIFMEDIA: lio_dev_dbg(lio->oct_dev, "ioctl: SIOCGIFMEDIA\n"); case SIOCGIFXMEDIA: lio_dev_dbg(lio->oct_dev, "ioctl: SIOCGIFXMEDIA\n"); error = ifmedia_ioctl(ifp, ifrequest, &lio->ifmedia, cmd); break; case SIOCSIFCAP: { int features = ifrequest->ifr_reqcap ^ if_getcapenable(ifp); lio_dev_dbg(lio->oct_dev, "ioctl: SIOCSIFCAP (Set Capabilities)\n"); if (!features) break; if (features & IFCAP_TXCSUM) { if_togglecapenable(ifp, IFCAP_TXCSUM); if (if_getcapenable(ifp) & IFCAP_TXCSUM) if_sethwassistbits(ifp, (CSUM_TCP | CSUM_UDP | CSUM_IP), 0); else if_sethwassistbits(ifp, 0, (CSUM_TCP | CSUM_UDP | CSUM_IP)); } if (features & IFCAP_TXCSUM_IPV6) { if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6); if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) if_sethwassistbits(ifp, (CSUM_UDP_IPV6 | CSUM_TCP_IPV6), 0); else if_sethwassistbits(ifp, 0, (CSUM_UDP_IPV6 | CSUM_TCP_IPV6)); } if (features & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) error |= lio_set_rx_csum(ifp, (features & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))); if (features & IFCAP_TSO4) error |= lio_set_tso4(ifp); if (features & IFCAP_TSO6) error |= lio_set_tso6(ifp); if (features & IFCAP_LRO) error |= lio_set_lro(ifp); if (features & IFCAP_VLAN_HWTAGGING) if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); if (features & IFCAP_VLAN_HWFILTER) if_togglecapenable(ifp, IFCAP_VLAN_HWFILTER); if (features & IFCAP_VLAN_HWTSO) if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); VLAN_CAPABILITIES(ifp); break; } default: lio_dev_dbg(lio->oct_dev, "ioctl: UNKNOWN (0x%X)\n", (int)cmd); error = ether_ioctl(ifp, cmd, data); break; } return (error); } static int -lio_set_tso4(struct ifnet *ifp) +lio_set_tso4(if_t ifp) { struct lio *lio = if_getsoftc(ifp); if (if_getcapabilities(ifp) & IFCAP_TSO4) { if_togglecapenable(ifp, IFCAP_TSO4); if (if_getcapenable(ifp) & IFCAP_TSO4) if_sethwassistbits(ifp, CSUM_IP_TSO, 0); else if_sethwassistbits(ifp, 0, CSUM_IP_TSO); } else { lio_dev_info(lio->oct_dev, "TSO4 capability not supported\n"); return (EINVAL); } return (0); } static int -lio_set_tso6(struct ifnet *ifp) +lio_set_tso6(if_t ifp) { struct lio *lio = if_getsoftc(ifp); if (if_getcapabilities(ifp) & IFCAP_TSO6) { if_togglecapenable(ifp, IFCAP_TSO6); if (if_getcapenable(ifp) & IFCAP_TSO6) if_sethwassistbits(ifp, CSUM_IP6_TSO, 0); else if_sethwassistbits(ifp, 0, CSUM_IP6_TSO); } else { lio_dev_info(lio->oct_dev, "TSO6 capability not supported\n"); return (EINVAL); } return (0); } static int -lio_set_rx_csum(struct ifnet *ifp, uint32_t data) +lio_set_rx_csum(if_t ifp, uint32_t data) { struct lio *lio = if_getsoftc(ifp); int ret = 0; if (if_getcapabilities(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) { if_togglecapenable(ifp, (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)); if (data) { /* LRO requires RXCSUM */ if ((if_getcapabilities(ifp) & IFCAP_LRO) && (if_getcapenable(ifp) & IFCAP_LRO)) { ret = lio_set_feature(ifp, LIO_CMD_LRO_DISABLE, LIO_LROIPV4 | LIO_LROIPV6); if_togglecapenable(ifp, IFCAP_LRO); } } } else { lio_dev_info(lio->oct_dev, "Rx checksum offload capability not supported\n"); return (ENODEV); } return ((ret) ? EINVAL : 0); } static int -lio_set_lro(struct ifnet *ifp) +lio_set_lro(if_t ifp) { struct lio *lio = if_getsoftc(ifp); int ret = 0; if (!(if_getcapabilities(ifp) & IFCAP_LRO)) { lio_dev_info(lio->oct_dev, "LRO capability not supported\n"); return (ENODEV); } if ((!(if_getcapenable(ifp) & IFCAP_LRO)) && (if_getcapenable(ifp) & IFCAP_RXCSUM) && (if_getcapenable(ifp) & IFCAP_RXCSUM_IPV6)) { if_togglecapenable(ifp, IFCAP_LRO); if (lio_hwlro) ret = lio_set_feature(ifp, LIO_CMD_LRO_ENABLE, LIO_LROIPV4 | LIO_LROIPV6); } else if (if_getcapenable(ifp) & IFCAP_LRO) { if_togglecapenable(ifp, IFCAP_LRO); if (lio_hwlro) ret = lio_set_feature(ifp, LIO_CMD_LRO_DISABLE, LIO_LROIPV4 | LIO_LROIPV6); } else lio_dev_info(lio->oct_dev, "LRO requires RXCSUM"); return ((ret) ? EINVAL : 0); } static void lio_mtu_ctl_callback(struct octeon_device *oct, uint32_t status, void *buf) { struct lio_soft_command *sc = buf; volatile int *mtu_sc_ctx; mtu_sc_ctx = sc->ctxptr; if (status) { lio_dev_err(oct, "MTU updation ctl instruction failed. Status: %llx\n", LIO_CAST64(status)); *mtu_sc_ctx = -1; /* * This barrier is required to be sure that the * response has been written fully. */ wmb(); return; } *mtu_sc_ctx = 1; /* * This barrier is required to be sure that the response has been * written fully. */ wmb(); } /* @param ifp is network device */ static int -lio_change_mtu(struct ifnet *ifp, int new_mtu) +lio_change_mtu(if_t ifp, int new_mtu) { struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; struct lio_soft_command *sc; union octeon_cmd *ncmd; volatile int *mtu_sc_ctx; int retval = 0; if (lio->mtu == new_mtu) return (0); /* * Limit the MTU to make sure the ethernet packets are between * LIO_MIN_MTU_SIZE bytes and LIO_MAX_MTU_SIZE bytes */ if ((new_mtu < LIO_MIN_MTU_SIZE) || (new_mtu > LIO_MAX_MTU_SIZE)) { lio_dev_err(oct, "Invalid MTU: %d\n", new_mtu); lio_dev_err(oct, "Valid range %d and %d\n", LIO_MIN_MTU_SIZE, LIO_MAX_MTU_SIZE); return (EINVAL); } sc = lio_alloc_soft_command(oct, OCTEON_CMD_SIZE, 16, sizeof(*mtu_sc_ctx)); if (sc == NULL) return (ENOMEM); ncmd = (union octeon_cmd *)sc->virtdptr; mtu_sc_ctx = sc->ctxptr; *mtu_sc_ctx = 0; ncmd->cmd64 = 0; ncmd->s.cmd = LIO_CMD_CHANGE_MTU; ncmd->s.param1 = new_mtu; lio_swap_8B_data((uint64_t *)ncmd, (OCTEON_CMD_SIZE >> 3)); sc->iq_no = lio->linfo.txpciq[0].s.q_no; lio_prepare_soft_command(oct, sc, LIO_OPCODE_NIC, LIO_OPCODE_NIC_CMD, 0, 0, 0); sc->callback = lio_mtu_ctl_callback; sc->callback_arg = sc; sc->wait_time = 5000; retval = lio_send_soft_command(oct, sc); if (retval == LIO_IQ_SEND_FAILED) { lio_dev_info(oct, "Failed to send MTU update Control message\n"); retval = EBUSY; goto mtu_updation_failed; } /* * Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ lio_sleep_cond(oct, mtu_sc_ctx); if (*mtu_sc_ctx < 0) { retval = EBUSY; goto mtu_updation_failed; } lio_dev_info(oct, "MTU Changed from %d to %d\n", if_getmtu(ifp), new_mtu); if_setmtu(ifp, new_mtu); lio->mtu = new_mtu; retval = 0; /* * this updation is make sure that LIO_IQ_SEND_STOP case * also success */ mtu_updation_failed: lio_free_soft_command(oct, sc); return (retval); } /* @param ifp network device */ int -lio_set_mac(struct ifnet *ifp, uint8_t *p) +lio_set_mac(if_t ifp, uint8_t *p) { struct lio_ctrl_pkt nctrl; struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; int ret = 0; if (!lio_is_valid_ether_addr(p)) return (EADDRNOTAVAIL); bzero(&nctrl, sizeof(struct lio_ctrl_pkt)); nctrl.ncmd.cmd64 = 0; nctrl.ncmd.s.cmd = LIO_CMD_CHANGE_MACADDR; nctrl.ncmd.s.param1 = 0; nctrl.ncmd.s.more = 1; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.lio = lio; nctrl.cb_fn = lio_ctrl_cmd_completion; nctrl.wait_time = 100; nctrl.udd[0] = 0; /* The MAC Address is presented in network byte order. */ memcpy((uint8_t *)&nctrl.udd[0] + 2, p, ETHER_HDR_LEN); ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { lio_dev_err(oct, "MAC Address change failed\n"); return (ENOMEM); } memcpy(((uint8_t *)&lio->linfo.hw_addr) + 2, p, ETHER_HDR_LEN); return (0); } /* * \brief Converts a mask based on ifp flags * @param ifp network device * * This routine generates a lio_ifflags mask from the ifp flags * received from the OS. */ static inline enum lio_ifflags -lio_get_new_flags(struct ifnet *ifp) +lio_get_new_flags(if_t ifp) { enum lio_ifflags f = LIO_IFFLAG_UNICAST; if (if_getflags(ifp) & IFF_PROMISC) f |= LIO_IFFLAG_PROMISC; if (if_getflags(ifp) & IFF_ALLMULTI) f |= LIO_IFFLAG_ALLMULTI; if (if_getflags(ifp) & IFF_MULTICAST) { f |= LIO_IFFLAG_MULTICAST; /* * Accept all multicast addresses if there are more than we * can handle */ if (if_getamcount(ifp) > LIO_MAX_MULTICAST_ADDR) f |= LIO_IFFLAG_ALLMULTI; } if (if_getflags(ifp) & IFF_BROADCAST) f |= LIO_IFFLAG_BROADCAST; return (f); } static u_int lio_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint64_t *mc = arg; if (cnt == LIO_MAX_MULTICAST_ADDR) return (0); mc += cnt; *mc = 0; memcpy(((uint8_t *)mc) + 2, LLADDR(sdl), ETHER_ADDR_LEN); /* no need to swap bytes */ return (1); } /* @param ifp network device */ static int -lio_set_mcast_list(struct ifnet *ifp) +lio_set_mcast_list(if_t ifp) { struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; struct lio_ctrl_pkt nctrl; int mc_count; int ret; bzero(&nctrl, sizeof(struct lio_ctrl_pkt)); /* Create a ctrl pkt command to be sent to core app. */ nctrl.ncmd.cmd64 = 0; nctrl.ncmd.s.cmd = LIO_CMD_SET_MULTI_LIST; nctrl.ncmd.s.param1 = lio_get_new_flags(ifp); nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.lio = lio; nctrl.cb_fn = lio_ctrl_cmd_completion; /* copy all the addresses into the udd */ mc_count = if_foreach_llmaddr(ifp, lio_copy_maddr, &nctrl.udd[0]); /* * Apparently, any activity in this call from the kernel has to * be atomic. So we won't wait for response. */ nctrl.wait_time = 0; nctrl.ncmd.s.param2 = mc_count; nctrl.ncmd.s.more = mc_count; ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { lio_dev_err(oct, "DEVFLAGS change failed in core (ret: 0x%x)\n", ret); } return ((ret) ? EINVAL : 0); } diff --git a/sys/dev/liquidio/lio_ioctl.h b/sys/dev/liquidio/lio_ioctl.h index d62948bafc7b..1c8db95e0c0b 100644 --- a/sys/dev/liquidio/lio_ioctl.h +++ b/sys/dev/liquidio/lio_ioctl.h @@ -1,44 +1,44 @@ /* * BSD LICENSE * * Copyright(c) 2017 Cavium, Inc.. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Cavium, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _LIO_IOCTL_H_ #define _LIO_IOCTL_H_ -int lio_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); +int lio_ioctl(if_t ifp, u_long cmd, caddr_t data); void lio_add_hw_stats(struct lio *lio); -void lio_stop(struct ifnet *ifp); +void lio_stop(if_t ifp); void lio_open(void *arg); -int lio_set_mac(struct ifnet *ifp, uint8_t *p); +int lio_set_mac(if_t ifp, uint8_t *p); #endif /* _LIO_IOCTL_H_ */ diff --git a/sys/dev/liquidio/lio_main.c b/sys/dev/liquidio/lio_main.c index 4406b86f55d8..7f4fe01ffaca 100644 --- a/sys/dev/liquidio/lio_main.c +++ b/sys/dev/liquidio/lio_main.c @@ -1,2308 +1,2308 @@ /* * BSD LICENSE * * Copyright(c) 2017 Cavium, Inc.. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Cavium, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "lio_bsd.h" #include "lio_common.h" #include "lio_droq.h" #include "lio_iq.h" #include "lio_response_manager.h" #include "lio_device.h" #include "lio_ctrl.h" #include "lio_main.h" #include "lio_network.h" #include "cn23xx_pf_device.h" #include "lio_image.h" #include "lio_ioctl.h" #include "lio_rxtx.h" #include "lio_rss.h" /* Number of milliseconds to wait for DDR initialization */ #define LIO_DDR_TIMEOUT 10000 #define LIO_MAX_FW_TYPE_LEN 8 static char fw_type[LIO_MAX_FW_TYPE_LEN]; TUNABLE_STR("hw.lio.fw_type", fw_type, sizeof(fw_type)); /* * Integers that specify number of queues per PF. * Valid range is 0 to 64. * Use 0 to derive from CPU count. */ static int num_queues_per_pf0; static int num_queues_per_pf1; TUNABLE_INT("hw.lio.num_queues_per_pf0", &num_queues_per_pf0); TUNABLE_INT("hw.lio.num_queues_per_pf1", &num_queues_per_pf1); #ifdef RSS static int lio_rss = 1; TUNABLE_INT("hw.lio.rss", &lio_rss); #endif /* RSS */ /* Hardware LRO */ unsigned int lio_hwlro = 0; TUNABLE_INT("hw.lio.hwlro", &lio_hwlro); /* * Bitmask indicating which consoles have debug * output redirected to syslog. */ static unsigned long console_bitmask; TUNABLE_ULONG("hw.lio.console_bitmask", &console_bitmask); /* * \brief determines if a given console has debug enabled. * @param console console to check * @returns 1 = enabled. 0 otherwise */ int lio_console_debug_enabled(uint32_t console) { return (console_bitmask >> (console)) & 0x1; } static int lio_detach(device_t dev); static int lio_device_init(struct octeon_device *octeon_dev); static int lio_chip_specific_setup(struct octeon_device *oct); static void lio_watchdog(void *param); static int lio_load_firmware(struct octeon_device *oct); static int lio_nic_starter(struct octeon_device *oct); static int lio_init_nic_module(struct octeon_device *oct); static int lio_setup_nic_devices(struct octeon_device *octeon_dev); static int lio_link_info(struct lio_recv_info *recv_info, void *ptr); static void lio_if_cfg_callback(struct octeon_device *oct, uint32_t status, void *buf); -static int lio_set_rxcsum_command(struct ifnet *ifp, int command, +static int lio_set_rxcsum_command(if_t ifp, int command, uint8_t rx_cmd); static int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs); static void lio_destroy_nic_device(struct octeon_device *oct, int ifidx); -static inline void lio_update_link_status(struct ifnet *ifp, +static inline void lio_update_link_status(if_t ifp, union octeon_link_status *ls); static void lio_send_rx_ctrl_cmd(struct lio *lio, int start_stop); static int lio_stop_nic_module(struct octeon_device *oct); static void lio_destroy_resources(struct octeon_device *oct); -static int lio_setup_rx_oom_poll_fn(struct ifnet *ifp); +static int lio_setup_rx_oom_poll_fn(if_t ifp); -static void lio_vlan_rx_add_vid(void *arg, struct ifnet *ifp, uint16_t vid); -static void lio_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, +static void lio_vlan_rx_add_vid(void *arg, if_t ifp, uint16_t vid); +static void lio_vlan_rx_kill_vid(void *arg, if_t ifp, uint16_t vid); static struct octeon_device * lio_get_other_octeon_device(struct octeon_device *oct); static int lio_wait_for_oq_pkts(struct octeon_device *oct); int lio_send_rss_param(struct lio *lio); static int lio_dbg_console_print(struct octeon_device *oct, uint32_t console_num, char *prefix, char *suffix); /* Polling interval for determining when NIC application is alive */ #define LIO_STARTER_POLL_INTERVAL_MS 100 /* * vendor_info_array. * This array contains the list of IDs on which the driver should load. */ struct lio_vendor_info { uint16_t vendor_id; uint16_t device_id; uint16_t subdevice_id; uint8_t revision_id; uint8_t index; }; static struct lio_vendor_info lio_pci_tbl[] = { /* CN2350 10G */ {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_10G_SUBDEVICE, 0x02, 0}, /* CN2350 10G */ {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_10G_SUBDEVICE1, 0x02, 0}, /* CN2360 10G */ {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2360_10G_SUBDEVICE, 0x02, 1}, /* CN2350 25G */ {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_25G_SUBDEVICE, 0x02, 2}, /* CN2360 25G */ {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2360_25G_SUBDEVICE, 0x02, 3}, {0, 0, 0, 0, 0} }; static char *lio_strings[] = { "LiquidIO 2350 10GbE Server Adapter", "LiquidIO 2360 10GbE Server Adapter", "LiquidIO 2350 25GbE Server Adapter", "LiquidIO 2360 25GbE Server Adapter", }; struct lio_if_cfg_resp { uint64_t rh; struct octeon_if_cfg_info cfg_info; uint64_t status; }; struct lio_if_cfg_context { int octeon_id; volatile int cond; }; struct lio_rx_ctl_context { int octeon_id; volatile int cond; }; static int lio_probe(device_t dev) { struct lio_vendor_info *tbl; uint16_t vendor_id; uint16_t device_id; uint16_t subdevice_id; uint8_t revision_id; char device_ver[256]; vendor_id = pci_get_vendor(dev); if (vendor_id != PCI_VENDOR_ID_CAVIUM) return (ENXIO); device_id = pci_get_device(dev); subdevice_id = pci_get_subdevice(dev); revision_id = pci_get_revid(dev); tbl = lio_pci_tbl; while (tbl->vendor_id) { if ((vendor_id == tbl->vendor_id) && (device_id == tbl->device_id) && (subdevice_id == tbl->subdevice_id) && (revision_id == tbl->revision_id)) { sprintf(device_ver, "%s, Version - %s", lio_strings[tbl->index], LIO_VERSION); device_set_desc_copy(dev, device_ver); return (BUS_PROBE_DEFAULT); } tbl++; } return (ENXIO); } static int lio_attach(device_t device) { struct octeon_device *oct_dev = NULL; uint64_t scratch1; uint32_t error; int timeout, ret = 1; uint8_t bus, dev, function; oct_dev = lio_allocate_device(device); if (oct_dev == NULL) { device_printf(device, "Error: Unable to allocate device\n"); return (-ENOMEM); } oct_dev->tx_budget = LIO_DEFAULT_TX_PKTS_PROCESS_BUDGET; oct_dev->rx_budget = LIO_DEFAULT_RX_PKTS_PROCESS_BUDGET; oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; oct_dev->device = device; bus = pci_get_bus(device); dev = pci_get_slot(device); function = pci_get_function(device); lio_dev_info(oct_dev, "Initializing device %x:%x %02x:%02x.%01x\n", pci_get_vendor(device), pci_get_device(device), bus, dev, function); if (lio_device_init(oct_dev)) { lio_dev_err(oct_dev, "Failed to init device\n"); lio_detach(device); return (-ENOMEM); } scratch1 = lio_read_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1); if (!(scratch1 & 4ULL)) { /* * Bit 2 of SLI_SCRATCH_1 is a flag that indicates that * the lio watchdog kernel thread is running for this * NIC. Each NIC gets one watchdog kernel thread. */ scratch1 |= 4ULL; lio_write_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1, scratch1); error = kproc_create(lio_watchdog, oct_dev, &oct_dev->watchdog_task, 0, 0, "liowd/%02hhx:%02hhx.%hhx", bus, dev, function); if (!error) { kproc_resume(oct_dev->watchdog_task); } else { oct_dev->watchdog_task = NULL; lio_dev_err(oct_dev, "failed to create kernel_thread\n"); lio_detach(device); return (-1); } } oct_dev->rx_pause = 1; oct_dev->tx_pause = 1; timeout = 0; while (timeout < LIO_NIC_STARTER_TIMEOUT) { lio_mdelay(LIO_STARTER_POLL_INTERVAL_MS); timeout += LIO_STARTER_POLL_INTERVAL_MS; /* * During the boot process interrupts are not available. * So polling for first control message from FW. */ if (cold) lio_droq_bh(oct_dev->droq[0], 0); if (atomic_load_acq_int(&oct_dev->status) == LIO_DEV_CORE_OK) { ret = lio_nic_starter(oct_dev); break; } } if (ret) { lio_dev_err(oct_dev, "Firmware failed to start\n"); lio_detach(device); return (-EIO); } lio_dev_dbg(oct_dev, "Device is ready\n"); return (0); } static int lio_detach(device_t dev) { struct octeon_device *oct_dev = device_get_softc(dev); lio_dev_dbg(oct_dev, "Stopping device\n"); if (oct_dev->watchdog_task) { uint64_t scratch1; kproc_suspend(oct_dev->watchdog_task, 0); scratch1 = lio_read_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1); scratch1 &= ~4ULL; lio_write_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1, scratch1); } if (oct_dev->app_mode && (oct_dev->app_mode == LIO_DRV_NIC_APP)) lio_stop_nic_module(oct_dev); /* * Reset the octeon device and cleanup all memory allocated for * the octeon device by driver. */ lio_destroy_resources(oct_dev); lio_dev_info(oct_dev, "Device removed\n"); /* * This octeon device has been removed. Update the global * data structure to reflect this. Free the device structure. */ lio_free_device_mem(oct_dev); return (0); } static int lio_shutdown(device_t dev) { struct octeon_device *oct_dev = device_get_softc(dev); struct lio *lio = if_getsoftc(oct_dev->props.ifp); lio_send_rx_ctrl_cmd(lio, 0); return (0); } static int lio_suspend(device_t dev) { return (ENXIO); } static int lio_resume(device_t dev) { return (ENXIO); } static int lio_event(struct module *mod, int event, void *junk) { switch (event) { case MOD_LOAD: lio_init_device_list(LIO_CFG_TYPE_DEFAULT); break; default: break; } return (0); } /********************************************************************* * FreeBSD Device Interface Entry Points * *******************************************************************/ static device_method_t lio_methods[] = { /* Device interface */ DEVMETHOD(device_probe, lio_probe), DEVMETHOD(device_attach, lio_attach), DEVMETHOD(device_detach, lio_detach), DEVMETHOD(device_shutdown, lio_shutdown), DEVMETHOD(device_suspend, lio_suspend), DEVMETHOD(device_resume, lio_resume), DEVMETHOD_END }; static driver_t lio_driver = { LIO_DRV_NAME, lio_methods, sizeof(struct octeon_device), }; DRIVER_MODULE(lio, pci, lio_driver, lio_event, NULL); MODULE_DEPEND(lio, pci, 1, 1, 1); MODULE_DEPEND(lio, ether, 1, 1, 1); MODULE_DEPEND(lio, firmware, 1, 1, 1); static bool fw_type_is_none(void) { return strncmp(fw_type, LIO_FW_NAME_TYPE_NONE, sizeof(LIO_FW_NAME_TYPE_NONE)) == 0; } /* * \brief Device initialization for each Octeon device that is probed * @param octeon_dev octeon device */ static int lio_device_init(struct octeon_device *octeon_dev) { unsigned long ddr_timeout = LIO_DDR_TIMEOUT; char *dbg_enb = NULL; int fw_loaded = 0; int i, j, ret; uint8_t bus, dev, function; char bootcmd[] = "\n"; bus = pci_get_bus(octeon_dev->device); dev = pci_get_slot(octeon_dev->device); function = pci_get_function(octeon_dev->device); atomic_store_rel_int(&octeon_dev->status, LIO_DEV_BEGIN_STATE); /* Enable access to the octeon device */ if (pci_enable_busmaster(octeon_dev->device)) { lio_dev_err(octeon_dev, "pci_enable_device failed\n"); return (1); } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_PCI_ENABLE_DONE); /* Identify the Octeon type and map the BAR address space. */ if (lio_chip_specific_setup(octeon_dev)) { lio_dev_err(octeon_dev, "Chip specific setup failed\n"); return (1); } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_PCI_MAP_DONE); /* * Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE', * since that is what is required for the reference to be removed * during de-initialization (see 'octeon_destroy_resources'). */ lio_register_device(octeon_dev, bus, dev, function, true); octeon_dev->app_mode = LIO_DRV_INVALID_APP; if (!lio_cn23xx_pf_fw_loaded(octeon_dev) && !fw_type_is_none()) { fw_loaded = 0; /* Do a soft reset of the Octeon device. */ if (octeon_dev->fn_list.soft_reset(octeon_dev)) return (1); /* things might have changed */ if (!lio_cn23xx_pf_fw_loaded(octeon_dev)) fw_loaded = 0; else fw_loaded = 1; } else { fw_loaded = 1; } /* * Initialize the dispatch mechanism used to push packets arriving on * Octeon Output queues. */ if (lio_init_dispatch_list(octeon_dev)) return (1); lio_register_dispatch_fn(octeon_dev, LIO_OPCODE_NIC, LIO_OPCODE_NIC_CORE_DRV_ACTIVE, lio_core_drv_init, octeon_dev); atomic_store_rel_int(&octeon_dev->status, LIO_DEV_DISPATCH_INIT_DONE); ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); if (ret) { lio_dev_err(octeon_dev, "Failed to configure device registers\n"); return (ret); } /* Initialize soft command buffer pool */ if (lio_setup_sc_buffer_pool(octeon_dev)) { lio_dev_err(octeon_dev, "sc buffer pool allocation failed\n"); return (1); } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_SC_BUFF_POOL_INIT_DONE); if (lio_allocate_ioq_vector(octeon_dev)) { lio_dev_err(octeon_dev, "IOQ vector allocation failed\n"); return (1); } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_MSIX_ALLOC_VECTOR_DONE); for (i = 0; i < LIO_MAX_POSSIBLE_INSTR_QUEUES; i++) { octeon_dev->instr_queue[i] = malloc(sizeof(struct lio_instr_queue), M_DEVBUF, M_NOWAIT | M_ZERO); if (octeon_dev->instr_queue[i] == NULL) return (1); } /* Setup the data structures that manage this Octeon's Input queues. */ if (lio_setup_instr_queue0(octeon_dev)) { lio_dev_err(octeon_dev, "Instruction queue initialization failed\n"); return (1); } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_INSTR_QUEUE_INIT_DONE); /* * Initialize lists to manage the requests of different types that * arrive from user & kernel applications for this octeon device. */ if (lio_setup_response_list(octeon_dev)) { lio_dev_err(octeon_dev, "Response list allocation failed\n"); return (1); } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_RESP_LIST_INIT_DONE); for (i = 0; i < LIO_MAX_POSSIBLE_OUTPUT_QUEUES; i++) { octeon_dev->droq[i] = malloc(sizeof(*octeon_dev->droq[i]), M_DEVBUF, M_NOWAIT | M_ZERO); if (octeon_dev->droq[i] == NULL) return (1); } if (lio_setup_output_queue0(octeon_dev)) { lio_dev_err(octeon_dev, "Output queue initialization failed\n"); return (1); } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_DROQ_INIT_DONE); /* * Setup the interrupt handler and record the INT SUM register address */ if (lio_setup_interrupt(octeon_dev, octeon_dev->sriov_info.num_pf_rings)) return (1); /* Enable Octeon device interrupts */ octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR); atomic_store_rel_int(&octeon_dev->status, LIO_DEV_INTR_SET_DONE); /* * Send Credit for Octeon Output queues. Credits are always sent BEFORE * the output queue is enabled. * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0. * Otherwise, it is possible that the DRV_ACTIVE message will be sent * before any credits have been issued, causing the ring to be reset * (and the f/w appear to never have started). */ for (j = 0; j < octeon_dev->num_oqs; j++) lio_write_csr32(octeon_dev, octeon_dev->droq[j]->pkts_credit_reg, octeon_dev->droq[j]->max_count); /* Enable the input and output queues for this Octeon device */ ret = octeon_dev->fn_list.enable_io_queues(octeon_dev); if (ret) { lio_dev_err(octeon_dev, "Failed to enable input/output queues"); return (ret); } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_IO_QUEUES_DONE); if (!fw_loaded) { lio_dev_dbg(octeon_dev, "Waiting for DDR initialization...\n"); if (!ddr_timeout) { lio_dev_info(octeon_dev, "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n"); } lio_sleep_timeout(LIO_RESET_MSECS); /* * Wait for the octeon to initialize DDR after the * soft-reset. */ while (!ddr_timeout) { if (pause("-", lio_ms_to_ticks(100))) { /* user probably pressed Control-C */ return (1); } } ret = lio_wait_for_ddr_init(octeon_dev, &ddr_timeout); if (ret) { lio_dev_err(octeon_dev, "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n", ret); return (1); } if (lio_wait_for_bootloader(octeon_dev, 1100)) { lio_dev_err(octeon_dev, "Board not responding\n"); return (1); } /* Divert uboot to take commands from host instead. */ ret = lio_console_send_cmd(octeon_dev, bootcmd, 50); lio_dev_dbg(octeon_dev, "Initializing consoles\n"); ret = lio_init_consoles(octeon_dev); if (ret) { lio_dev_err(octeon_dev, "Could not access board consoles\n"); return (1); } /* * If console debug enabled, specify empty string to * use default enablement ELSE specify NULL string for * 'disabled'. */ dbg_enb = lio_console_debug_enabled(0) ? "" : NULL; ret = lio_add_console(octeon_dev, 0, dbg_enb); if (ret) { lio_dev_err(octeon_dev, "Could not access board console\n"); return (1); } else if (lio_console_debug_enabled(0)) { /* * If console was added AND we're logging console output * then set our console print function. */ octeon_dev->console[0].print = lio_dbg_console_print; } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_CONSOLE_INIT_DONE); lio_dev_dbg(octeon_dev, "Loading firmware\n"); ret = lio_load_firmware(octeon_dev); if (ret) { lio_dev_err(octeon_dev, "Could not load firmware to board\n"); return (1); } } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_HOST_OK); return (0); } /* * \brief PCI FLR for each Octeon device. * @param oct octeon device */ static void lio_pci_flr(struct octeon_device *oct) { uint32_t exppos, status; pci_find_cap(oct->device, PCIY_EXPRESS, &exppos); pci_save_state(oct->device); /* Quiesce the device completely */ pci_write_config(oct->device, PCIR_COMMAND, PCIM_CMD_INTxDIS, 2); /* Wait for Transaction Pending bit clean */ lio_mdelay(100); status = pci_read_config(oct->device, exppos + PCIER_DEVICE_STA, 2); if (status & PCIEM_STA_TRANSACTION_PND) { lio_dev_info(oct, "Function reset incomplete after 100ms, sleeping for 5 seconds\n"); lio_mdelay(5); status = pci_read_config(oct->device, exppos + PCIER_DEVICE_STA, 2); if (status & PCIEM_STA_TRANSACTION_PND) lio_dev_info(oct, "Function reset still incomplete after 5s, reset anyway\n"); } pci_write_config(oct->device, exppos + PCIER_DEVICE_CTL, PCIEM_CTL_INITIATE_FLR, 2); lio_mdelay(100); pci_restore_state(oct->device); } /* * \brief Debug console print function * @param octeon_dev octeon device * @param console_num console number * @param prefix first portion of line to display * @param suffix second portion of line to display * * The OCTEON debug console outputs entire lines (excluding '\n'). * Normally, the line will be passed in the 'prefix' parameter. * However, due to buffering, it is possible for a line to be split into two * parts, in which case they will be passed as the 'prefix' parameter and * 'suffix' parameter. */ static int lio_dbg_console_print(struct octeon_device *oct, uint32_t console_num, char *prefix, char *suffix) { if (prefix != NULL && suffix != NULL) lio_dev_info(oct, "%u: %s%s\n", console_num, prefix, suffix); else if (prefix != NULL) lio_dev_info(oct, "%u: %s\n", console_num, prefix); else if (suffix != NULL) lio_dev_info(oct, "%u: %s\n", console_num, suffix); return (0); } static void lio_watchdog(void *param) { int core_num; uint16_t mask_of_crashed_or_stuck_cores = 0; struct octeon_device *oct = param; bool err_msg_was_printed[12]; bzero(err_msg_was_printed, sizeof(err_msg_was_printed)); while (1) { kproc_suspend_check(oct->watchdog_task); mask_of_crashed_or_stuck_cores = (uint16_t)lio_read_csr64(oct, LIO_CN23XX_SLI_SCRATCH2); if (mask_of_crashed_or_stuck_cores) { struct octeon_device *other_oct; oct->cores_crashed = true; other_oct = lio_get_other_octeon_device(oct); if (other_oct != NULL) other_oct->cores_crashed = true; for (core_num = 0; core_num < LIO_MAX_CORES; core_num++) { bool core_crashed_or_got_stuck; core_crashed_or_got_stuck = (mask_of_crashed_or_stuck_cores >> core_num) & 1; if (core_crashed_or_got_stuck && !err_msg_was_printed[core_num]) { lio_dev_err(oct, "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n", core_num); err_msg_was_printed[core_num] = true; } } } /* sleep for two seconds */ pause("-", lio_ms_to_ticks(2000)); } } static int lio_chip_specific_setup(struct octeon_device *oct) { char *s; uint32_t dev_id; int ret = 1; dev_id = lio_read_pci_cfg(oct, 0); oct->subdevice_id = pci_get_subdevice(oct->device); switch (dev_id) { case LIO_CN23XX_PF_PCIID: oct->chip_id = LIO_CN23XX_PF_VID; if (pci_get_function(oct->device) == 0) { if (num_queues_per_pf0 < 0) { lio_dev_info(oct, "Invalid num_queues_per_pf0: %d, Setting it to default\n", num_queues_per_pf0); num_queues_per_pf0 = 0; } oct->sriov_info.num_pf_rings = num_queues_per_pf0; } else { if (num_queues_per_pf1 < 0) { lio_dev_info(oct, "Invalid num_queues_per_pf1: %d, Setting it to default\n", num_queues_per_pf1); num_queues_per_pf1 = 0; } oct->sriov_info.num_pf_rings = num_queues_per_pf1; } ret = lio_cn23xx_pf_setup_device(oct); s = "CN23XX"; break; default: s = "?"; lio_dev_err(oct, "Unknown device found (dev_id: %x)\n", dev_id); } if (!ret) lio_dev_info(oct, "%s PASS%d.%d %s Version: %s\n", s, OCTEON_MAJOR_REV(oct), OCTEON_MINOR_REV(oct), lio_get_conf(oct)->card_name, LIO_VERSION); return (ret); } static struct octeon_device * lio_get_other_octeon_device(struct octeon_device *oct) { struct octeon_device *other_oct; other_oct = lio_get_device(oct->octeon_id + 1); if ((other_oct != NULL) && other_oct->device) { int oct_busnum, other_oct_busnum; oct_busnum = pci_get_bus(oct->device); other_oct_busnum = pci_get_bus(other_oct->device); if (oct_busnum == other_oct_busnum) { int oct_slot, other_oct_slot; oct_slot = pci_get_slot(oct->device); other_oct_slot = pci_get_slot(other_oct->device); if (oct_slot == other_oct_slot) return (other_oct); } } return (NULL); } /* * \brief Load firmware to device * @param oct octeon device * * Maps device to firmware filename, requests firmware, and downloads it */ static int lio_load_firmware(struct octeon_device *oct) { const struct firmware *fw; char *tmp_fw_type = NULL; int ret = 0; char fw_name[LIO_MAX_FW_FILENAME_LEN]; if (fw_type[0] == '\0') tmp_fw_type = LIO_FW_NAME_TYPE_NIC; else tmp_fw_type = fw_type; sprintf(fw_name, "%s%s_%s%s", LIO_FW_BASE_NAME, lio_get_conf(oct)->card_name, tmp_fw_type, LIO_FW_NAME_SUFFIX); fw = firmware_get(fw_name); if (fw == NULL) { lio_dev_err(oct, "Request firmware failed. Could not find file %s.\n", fw_name); return (EINVAL); } ret = lio_download_firmware(oct, fw->data, fw->datasize); firmware_put(fw, FIRMWARE_UNLOAD); return (ret); } static int lio_nic_starter(struct octeon_device *oct) { int ret = 0; atomic_store_rel_int(&oct->status, LIO_DEV_RUNNING); if (oct->app_mode && oct->app_mode == LIO_DRV_NIC_APP) { if (lio_init_nic_module(oct)) { lio_dev_err(oct, "NIC initialization failed\n"); ret = -1; #ifdef CAVIUM_ONiLY_23XX_VF } else { if (octeon_enable_sriov(oct) < 0) ret = -1; #endif } } else { lio_dev_err(oct, "Unexpected application running on NIC (%d). Check firmware.\n", oct->app_mode); ret = -1; } return (ret); } static int lio_init_nic_module(struct octeon_device *oct) { int num_nic_ports = LIO_GET_NUM_NIC_PORTS_CFG(lio_get_conf(oct)); int retval = 0; lio_dev_dbg(oct, "Initializing network interfaces\n"); /* * only default iq and oq were initialized * initialize the rest as well */ /* run port_config command for each port */ oct->ifcount = num_nic_ports; bzero(&oct->props, sizeof(struct lio_if_props)); oct->props.gmxport = -1; retval = lio_setup_nic_devices(oct); if (retval) { lio_dev_err(oct, "Setup NIC devices failed\n"); goto lio_init_failure; } lio_dev_dbg(oct, "Network interfaces ready\n"); return (retval); lio_init_failure: oct->ifcount = 0; return (retval); } static int -lio_ifmedia_update(struct ifnet *ifp) +lio_ifmedia_update(if_t ifp) { struct lio *lio = if_getsoftc(ifp); struct ifmedia *ifm; ifm = &lio->ifmedia; /* We only support Ethernet media type. */ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: break; case IFM_10G_CX4: case IFM_10G_SR: case IFM_10G_T: case IFM_10G_TWINAX: default: /* We don't support changing the media type. */ lio_dev_err(lio->oct_dev, "Invalid media type (%d)\n", IFM_SUBTYPE(ifm->ifm_media)); return (EINVAL); } return (0); } static int lio_get_media_subtype(struct octeon_device *oct) { switch(oct->subdevice_id) { case LIO_CN2350_10G_SUBDEVICE: case LIO_CN2350_10G_SUBDEVICE1: case LIO_CN2360_10G_SUBDEVICE: return (IFM_10G_SR); case LIO_CN2350_25G_SUBDEVICE: case LIO_CN2360_25G_SUBDEVICE: return (IFM_25G_SR); } return (IFM_10G_SR); } static uint64_t lio_get_baudrate(struct octeon_device *oct) { switch(oct->subdevice_id) { case LIO_CN2350_10G_SUBDEVICE: case LIO_CN2350_10G_SUBDEVICE1: case LIO_CN2360_10G_SUBDEVICE: return (IF_Gbps(10)); case LIO_CN2350_25G_SUBDEVICE: case LIO_CN2360_25G_SUBDEVICE: return (IF_Gbps(25)); } return (IF_Gbps(10)); } static void -lio_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) +lio_ifmedia_status(if_t ifp, struct ifmediareq *ifmr) { struct lio *lio = if_getsoftc(ifp); /* Report link down if the driver isn't running. */ if (!lio_ifstate_check(lio, LIO_IFSTATE_RUNNING)) { ifmr->ifm_active |= IFM_NONE; return; } /* Setup the default interface info. */ ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (lio->linfo.link.s.link_up) { ifmr->ifm_status |= IFM_ACTIVE; } else { ifmr->ifm_active |= IFM_NONE; return; } ifmr->ifm_active |= lio_get_media_subtype(lio->oct_dev); if (lio->linfo.link.s.duplex) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } static uint64_t lio_get_counter(if_t ifp, ift_counter cnt) { struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; uint64_t counter = 0; int i, q_no; switch (cnt) { case IFCOUNTER_IPACKETS: for (i = 0; i < oct->num_oqs; i++) { q_no = lio->linfo.rxpciq[i].s.q_no; counter += oct->droq[q_no]->stats.rx_pkts_received; } break; case IFCOUNTER_OPACKETS: for (i = 0; i < oct->num_iqs; i++) { q_no = lio->linfo.txpciq[i].s.q_no; counter += oct->instr_queue[q_no]->stats.tx_done; } break; case IFCOUNTER_IBYTES: for (i = 0; i < oct->num_oqs; i++) { q_no = lio->linfo.rxpciq[i].s.q_no; counter += oct->droq[q_no]->stats.rx_bytes_received; } break; case IFCOUNTER_OBYTES: for (i = 0; i < oct->num_iqs; i++) { q_no = lio->linfo.txpciq[i].s.q_no; counter += oct->instr_queue[q_no]->stats.tx_tot_bytes; } break; case IFCOUNTER_IQDROPS: for (i = 0; i < oct->num_oqs; i++) { q_no = lio->linfo.rxpciq[i].s.q_no; counter += oct->droq[q_no]->stats.rx_dropped; } break; case IFCOUNTER_OQDROPS: for (i = 0; i < oct->num_iqs; i++) { q_no = lio->linfo.txpciq[i].s.q_no; counter += oct->instr_queue[q_no]->stats.tx_dropped; } break; case IFCOUNTER_IMCASTS: counter = oct->link_stats.fromwire.total_mcst; break; case IFCOUNTER_OMCASTS: counter = oct->link_stats.fromhost.mcast_pkts_sent; break; case IFCOUNTER_COLLISIONS: counter = oct->link_stats.fromhost.total_collisions; break; case IFCOUNTER_IERRORS: counter = oct->link_stats.fromwire.fcs_err + oct->link_stats.fromwire.l2_err + oct->link_stats.fromwire.frame_err; break; default: return (if_get_counter_default(ifp, cnt)); } return (counter); } static int lio_init_ifnet(struct lio *lio) { struct octeon_device *oct = lio->oct_dev; - if_t ifp = lio->ifp; + if_t ifp = lio->ifp; /* ifconfig entrypoint for media type/status reporting */ ifmedia_init(&lio->ifmedia, IFM_IMASK, lio_ifmedia_update, lio_ifmedia_status); /* set the default interface values */ ifmedia_add(&lio->ifmedia, (IFM_ETHER | IFM_FDX | lio_get_media_subtype(oct)), 0, NULL); ifmedia_add(&lio->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL); ifmedia_set(&lio->ifmedia, (IFM_ETHER | IFM_AUTO)); lio->ifmedia.ifm_media = lio->ifmedia.ifm_cur->ifm_media; lio_dev_dbg(oct, "IFMEDIA flags : %x\n", lio->ifmedia.ifm_media); if_initname(ifp, device_get_name(oct->device), device_get_unit(oct->device)); if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST)); if_setioctlfn(ifp, lio_ioctl); if_setgetcounterfn(ifp, lio_get_counter); if_settransmitfn(ifp, lio_mq_start); if_setqflushfn(ifp, lio_qflush); if_setinitfn(ifp, lio_open); if_setmtu(ifp, lio->linfo.link.s.mtu); lio->mtu = lio->linfo.link.s.mtu; if_sethwassist(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)); if_setcapabilitiesbit(ifp, (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_TSO | IFCAP_LRO | IFCAP_JUMBO_MTU | IFCAP_HWSTATS | IFCAP_LINKSTATE | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU), 0); if_setcapenable(ifp, if_getcapabilities(ifp)); if_setbaudrate(ifp, lio_get_baudrate(oct)); return (0); } static void -lio_tcp_lro_free(struct octeon_device *octeon_dev, struct ifnet *ifp) +lio_tcp_lro_free(struct octeon_device *octeon_dev, if_t ifp) { struct lio *lio = if_getsoftc(ifp); struct lio_droq *droq; int q_no; int i; for (i = 0; i < octeon_dev->num_oqs; i++) { q_no = lio->linfo.rxpciq[i].s.q_no; droq = octeon_dev->droq[q_no]; if (droq->lro.ifp) { tcp_lro_free(&droq->lro); droq->lro.ifp = NULL; } } } static int -lio_tcp_lro_init(struct octeon_device *octeon_dev, struct ifnet *ifp) +lio_tcp_lro_init(struct octeon_device *octeon_dev, if_t ifp) { struct lio *lio = if_getsoftc(ifp); struct lio_droq *droq; struct lro_ctrl *lro; int i, q_no, ret = 0; for (i = 0; i < octeon_dev->num_oqs; i++) { q_no = lio->linfo.rxpciq[i].s.q_no; droq = octeon_dev->droq[q_no]; lro = &droq->lro; ret = tcp_lro_init(lro); if (ret) { lio_dev_err(octeon_dev, "LRO Initialization failed ret %d\n", ret); goto lro_init_failed; } lro->ifp = ifp; } return (ret); lro_init_failed: lio_tcp_lro_free(octeon_dev, ifp); return (ret); } static int lio_setup_nic_devices(struct octeon_device *octeon_dev) { union octeon_if_cfg if_cfg; struct lio *lio = NULL; - struct ifnet *ifp = NULL; + if_t ifp = NULL; struct lio_version *vdata; struct lio_soft_command *sc; struct lio_if_cfg_context *ctx; struct lio_if_cfg_resp *resp; struct lio_if_props *props; int num_iqueues, num_oqueues, retval; unsigned int base_queue; unsigned int gmx_port_id; uint32_t ctx_size, data_size; uint32_t ifidx_or_pfnum, resp_size; uint8_t mac[ETHER_HDR_LEN], i, j; /* This is to handle link status changes */ lio_register_dispatch_fn(octeon_dev, LIO_OPCODE_NIC, LIO_OPCODE_NIC_INFO, lio_link_info, octeon_dev); for (i = 0; i < octeon_dev->ifcount; i++) { resp_size = sizeof(struct lio_if_cfg_resp); ctx_size = sizeof(struct lio_if_cfg_context); data_size = sizeof(struct lio_version); sc = lio_alloc_soft_command(octeon_dev, data_size, resp_size, ctx_size); if (sc == NULL) return (ENOMEM); resp = (struct lio_if_cfg_resp *)sc->virtrptr; ctx = (struct lio_if_cfg_context *)sc->ctxptr; vdata = (struct lio_version *)sc->virtdptr; *((uint64_t *)vdata) = 0; vdata->major = htobe16(LIO_BASE_MAJOR_VERSION); vdata->minor = htobe16(LIO_BASE_MINOR_VERSION); vdata->micro = htobe16(LIO_BASE_MICRO_VERSION); num_iqueues = octeon_dev->sriov_info.num_pf_rings; num_oqueues = octeon_dev->sriov_info.num_pf_rings; base_queue = octeon_dev->sriov_info.pf_srn; gmx_port_id = octeon_dev->pf_num; ifidx_or_pfnum = octeon_dev->pf_num; lio_dev_dbg(octeon_dev, "requesting config for interface %d, iqs %d, oqs %d\n", ifidx_or_pfnum, num_iqueues, num_oqueues); ctx->cond = 0; ctx->octeon_id = lio_get_device_id(octeon_dev); if_cfg.if_cfg64 = 0; if_cfg.s.num_iqueues = num_iqueues; if_cfg.s.num_oqueues = num_oqueues; if_cfg.s.base_queue = base_queue; if_cfg.s.gmx_port_id = gmx_port_id; sc->iq_no = 0; lio_prepare_soft_command(octeon_dev, sc, LIO_OPCODE_NIC, LIO_OPCODE_NIC_IF_CFG, 0, if_cfg.if_cfg64, 0); sc->callback = lio_if_cfg_callback; sc->callback_arg = sc; sc->wait_time = 3000; retval = lio_send_soft_command(octeon_dev, sc); if (retval == LIO_IQ_SEND_FAILED) { lio_dev_err(octeon_dev, "iq/oq config failed status: %x\n", retval); /* Soft instr is freed by driver in case of failure. */ goto setup_nic_dev_fail; } /* * Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ lio_sleep_cond(octeon_dev, &ctx->cond); retval = resp->status; if (retval) { lio_dev_err(octeon_dev, "iq/oq config failed\n"); goto setup_nic_dev_fail; } lio_swap_8B_data((uint64_t *)(&resp->cfg_info), (sizeof(struct octeon_if_cfg_info)) >> 3); num_iqueues = bitcount64(resp->cfg_info.iqmask); num_oqueues = bitcount64(resp->cfg_info.oqmask); if (!(num_iqueues) || !(num_oqueues)) { lio_dev_err(octeon_dev, "Got bad iqueues (%016llX) or oqueues (%016llX) from firmware.\n", LIO_CAST64(resp->cfg_info.iqmask), LIO_CAST64(resp->cfg_info.oqmask)); goto setup_nic_dev_fail; } lio_dev_dbg(octeon_dev, "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", i, LIO_CAST64(resp->cfg_info.iqmask), LIO_CAST64(resp->cfg_info.oqmask), num_iqueues, num_oqueues); ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { lio_dev_err(octeon_dev, "Device allocation failed\n"); goto setup_nic_dev_fail; } lio = malloc(sizeof(struct lio), M_DEVBUF, M_NOWAIT | M_ZERO); if (lio == NULL) { lio_dev_err(octeon_dev, "Lio allocation failed\n"); goto setup_nic_dev_fail; } if_setsoftc(ifp, lio); - ifp->if_hw_tsomax = LIO_MAX_FRAME_SIZE; - ifp->if_hw_tsomaxsegcount = LIO_MAX_SG; - ifp->if_hw_tsomaxsegsize = PAGE_SIZE; + if_sethwtsomax(ifp, LIO_MAX_FRAME_SIZE); + if_sethwtsomaxsegcount(ifp, LIO_MAX_SG); + if_sethwtsomaxsegsize(ifp, PAGE_SIZE); lio->ifidx = ifidx_or_pfnum; props = &octeon_dev->props; props->gmxport = resp->cfg_info.linfo.gmxport; props->ifp = ifp; lio->linfo.num_rxpciq = num_oqueues; lio->linfo.num_txpciq = num_iqueues; for (j = 0; j < num_oqueues; j++) { lio->linfo.rxpciq[j].rxpciq64 = resp->cfg_info.linfo.rxpciq[j].rxpciq64; } for (j = 0; j < num_iqueues; j++) { lio->linfo.txpciq[j].txpciq64 = resp->cfg_info.linfo.txpciq[j].txpciq64; } lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; lio->linfo.link.link_status64 = resp->cfg_info.linfo.link.link_status64; /* * Point to the properties for octeon device to which this * interface belongs. */ lio->oct_dev = octeon_dev; lio->ifp = ifp; lio_dev_dbg(octeon_dev, "if%d gmx: %d hw_addr: 0x%llx\n", i, lio->linfo.gmxport, LIO_CAST64(lio->linfo.hw_addr)); lio_init_ifnet(lio); /* 64-bit swap required on LE machines */ lio_swap_8B_data(&lio->linfo.hw_addr, 1); for (j = 0; j < 6; j++) mac[j] = *((uint8_t *)( ((uint8_t *)&lio->linfo.hw_addr) + 2 + j)); ether_ifattach(ifp, mac); /* * By default all interfaces on a single Octeon uses the same * tx and rx queues */ lio->txq = lio->linfo.txpciq[0].s.q_no; lio->rxq = lio->linfo.rxpciq[0].s.q_no; if (lio_setup_io_queues(octeon_dev, i, lio->linfo.num_txpciq, lio->linfo.num_rxpciq)) { lio_dev_err(octeon_dev, "I/O queues creation failed\n"); goto setup_nic_dev_fail; } lio_ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); lio->tx_qsize = lio_get_tx_qsize(octeon_dev, lio->txq); lio->rx_qsize = lio_get_rx_qsize(octeon_dev, lio->rxq); if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { lio_dev_err(octeon_dev, "Gather list allocation failed\n"); goto setup_nic_dev_fail; } if ((lio_hwlro == 0) && lio_tcp_lro_init(octeon_dev, ifp)) goto setup_nic_dev_fail; if (lio_hwlro && (if_getcapenable(ifp) & IFCAP_LRO) && (if_getcapenable(ifp) & IFCAP_RXCSUM) && (if_getcapenable(ifp) & IFCAP_RXCSUM_IPV6)) lio_set_feature(ifp, LIO_CMD_LRO_ENABLE, LIO_LROIPV4 | LIO_LROIPV6); if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)) lio_set_feature(ifp, LIO_CMD_VLAN_FILTER_CTL, 1); else lio_set_feature(ifp, LIO_CMD_VLAN_FILTER_CTL, 0); if (lio_setup_rx_oom_poll_fn(ifp)) goto setup_nic_dev_fail; lio_dev_dbg(octeon_dev, "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); lio->link_changes++; lio_ifstate_set(lio, LIO_IFSTATE_REGISTERED); /* * Sending command to firmware to enable Rx checksum offload * by default at the time of setup of Liquidio driver for * this device */ lio_set_rxcsum_command(ifp, LIO_CMD_TNL_RX_CSUM_CTL, LIO_CMD_RXCSUM_ENABLE); lio_set_feature(ifp, LIO_CMD_TNL_TX_CSUM_CTL, LIO_CMD_TXCSUM_ENABLE); #ifdef RSS if (lio_rss) { if (lio_send_rss_param(lio)) goto setup_nic_dev_fail; } else #endif /* RSS */ lio_set_feature(ifp, LIO_CMD_SET_FNV, LIO_CMD_FNV_ENABLE); lio_dev_dbg(octeon_dev, "NIC ifidx:%d Setup successful\n", i); lio_free_soft_command(octeon_dev, sc); lio->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, lio_vlan_rx_add_vid, lio, EVENTHANDLER_PRI_FIRST); lio->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, lio_vlan_rx_kill_vid, lio, EVENTHANDLER_PRI_FIRST); /* Update stats periodically */ callout_init(&lio->stats_timer, 0); lio->stats_interval = LIO_DEFAULT_STATS_INTERVAL; lio_add_hw_stats(lio); } return (0); setup_nic_dev_fail: lio_free_soft_command(octeon_dev, sc); while (i--) { lio_dev_err(octeon_dev, "NIC ifidx:%d Setup failed\n", i); lio_destroy_nic_device(octeon_dev, i); } return (ENODEV); } static int lio_link_info(struct lio_recv_info *recv_info, void *ptr) { struct octeon_device *oct = (struct octeon_device *)ptr; struct lio_recv_pkt *recv_pkt = recv_info->recv_pkt; union octeon_link_status *ls; int gmxport = 0, i; lio_dev_dbg(oct, "%s Called\n", __func__); if (recv_pkt->buffer_size[0] != (sizeof(*ls) + LIO_DROQ_INFO_SIZE)) { lio_dev_err(oct, "Malformed NIC_INFO, len=%d, ifidx=%d\n", recv_pkt->buffer_size[0], recv_pkt->rh.r_nic_info.gmxport); goto nic_info_err; } gmxport = recv_pkt->rh.r_nic_info.gmxport; ls = (union octeon_link_status *)(recv_pkt->buffer_ptr[0]->m_data + LIO_DROQ_INFO_SIZE); lio_swap_8B_data((uint64_t *)ls, (sizeof(union octeon_link_status)) >> 3); if (oct->props.gmxport == gmxport) lio_update_link_status(oct->props.ifp, ls); nic_info_err: for (i = 0; i < recv_pkt->buffer_count; i++) lio_recv_buffer_free(recv_pkt->buffer_ptr[i]); lio_free_recv_info(recv_info); return (0); } void lio_free_mbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo) { bus_dmamap_sync(iq->txtag, finfo->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(iq->txtag, finfo->map); m_freem(finfo->mb); } void lio_free_sgmbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo) { struct lio_gather *g; struct octeon_device *oct; struct lio *lio; int iq_no; g = finfo->g; iq_no = iq->txpciq.s.q_no; oct = iq->oct_dev; lio = if_getsoftc(oct->props.ifp); mtx_lock(&lio->glist_lock[iq_no]); STAILQ_INSERT_TAIL(&lio->ghead[iq_no], &g->node, entries); mtx_unlock(&lio->glist_lock[iq_no]); bus_dmamap_sync(iq->txtag, finfo->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(iq->txtag, finfo->map); m_freem(finfo->mb); } static void lio_if_cfg_callback(struct octeon_device *oct, uint32_t status, void *buf) { struct lio_soft_command *sc = (struct lio_soft_command *)buf; struct lio_if_cfg_resp *resp; struct lio_if_cfg_context *ctx; resp = (struct lio_if_cfg_resp *)sc->virtrptr; ctx = (struct lio_if_cfg_context *)sc->ctxptr; oct = lio_get_device(ctx->octeon_id); if (resp->status) lio_dev_err(oct, "nic if cfg instruction failed. Status: %llx (0x%08x)\n", LIO_CAST64(resp->status), status); ctx->cond = 1; snprintf(oct->fw_info.lio_firmware_version, 32, "%s", resp->cfg_info.lio_firmware_version); /* * This barrier is required to be sure that the response has been * written fully before waking up the handler */ wmb(); } static int lio_is_mac_changed(uint8_t *new, uint8_t *old) { return ((new[0] != old[0]) || (new[1] != old[1]) || (new[2] != old[2]) || (new[3] != old[3]) || (new[4] != old[4]) || (new[5] != old[5])); } void lio_open(void *arg) { struct lio *lio = arg; - struct ifnet *ifp = lio->ifp; + if_t ifp = lio->ifp; struct octeon_device *oct = lio->oct_dev; uint8_t *mac_new, mac_old[ETHER_HDR_LEN]; int ret = 0; lio_ifstate_set(lio, LIO_IFSTATE_RUNNING); /* Ready for link status updates */ lio->intf_open = 1; lio_dev_info(oct, "Interface Open, ready for traffic\n"); /* tell Octeon to start forwarding packets to host */ lio_send_rx_ctrl_cmd(lio, 1); - mac_new = IF_LLADDR(ifp); + mac_new = if_getlladdr(ifp); memcpy(mac_old, ((uint8_t *)&lio->linfo.hw_addr) + 2, ETHER_HDR_LEN); if (lio_is_mac_changed(mac_new, mac_old)) { ret = lio_set_mac(ifp, mac_new); if (ret) lio_dev_err(oct, "MAC change failed, error: %d\n", ret); } /* Now inform the stack we're ready */ if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); lio_dev_info(oct, "Interface is opened\n"); } static int -lio_set_rxcsum_command(struct ifnet *ifp, int command, uint8_t rx_cmd) +lio_set_rxcsum_command(if_t ifp, int command, uint8_t rx_cmd) { struct lio_ctrl_pkt nctrl; struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; int ret = 0; nctrl.ncmd.cmd64 = 0; nctrl.ncmd.s.cmd = command; nctrl.ncmd.s.param1 = rx_cmd; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 100; nctrl.lio = lio; nctrl.cb_fn = lio_ctrl_cmd_completion; ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { lio_dev_err(oct, "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n", ret); } return (ret); } static int lio_stop_nic_module(struct octeon_device *oct) { int i, j; struct lio *lio; lio_dev_dbg(oct, "Stopping network interfaces\n"); if (!oct->ifcount) { lio_dev_err(oct, "Init for Octeon was not completed\n"); return (1); } mtx_lock(&oct->cmd_resp_wqlock); oct->cmd_resp_state = LIO_DRV_OFFLINE; mtx_unlock(&oct->cmd_resp_wqlock); for (i = 0; i < oct->ifcount; i++) { lio = if_getsoftc(oct->props.ifp); for (j = 0; j < oct->num_oqs; j++) lio_unregister_droq_ops(oct, lio->linfo.rxpciq[j].s.q_no); } callout_drain(&lio->stats_timer); for (i = 0; i < oct->ifcount; i++) lio_destroy_nic_device(oct, i); lio_dev_dbg(oct, "Network interface stopped\n"); return (0); } static void lio_delete_glists(struct octeon_device *oct, struct lio *lio) { struct lio_gather *g; int i; if (lio->glist_lock != NULL) { free((void *)lio->glist_lock, M_DEVBUF); lio->glist_lock = NULL; } if (lio->ghead == NULL) return; for (i = 0; i < lio->linfo.num_txpciq; i++) { do { g = (struct lio_gather *) lio_delete_first_node(&lio->ghead[i]); free(g, M_DEVBUF); } while (g); if ((lio->glists_virt_base != NULL) && (lio->glists_virt_base[i] != NULL)) { lio_dma_free(lio->glist_entry_size * lio->tx_qsize, lio->glists_virt_base[i]); } } free(lio->glists_virt_base, M_DEVBUF); lio->glists_virt_base = NULL; free(lio->glists_dma_base, M_DEVBUF); lio->glists_dma_base = NULL; free(lio->ghead, M_DEVBUF); lio->ghead = NULL; } static int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) { struct lio_gather *g; int i, j; lio->glist_lock = malloc(num_iqs * sizeof(*lio->glist_lock), M_DEVBUF, M_NOWAIT | M_ZERO); if (lio->glist_lock == NULL) return (1); lio->ghead = malloc(num_iqs * sizeof(*lio->ghead), M_DEVBUF, M_NOWAIT | M_ZERO); if (lio->ghead == NULL) { free((void *)lio->glist_lock, M_DEVBUF); lio->glist_lock = NULL; return (1); } lio->glist_entry_size = ROUNDUP8((ROUNDUP4(LIO_MAX_SG) >> 2) * LIO_SG_ENTRY_SIZE); /* * allocate memory to store virtual and dma base address of * per glist consistent memory */ lio->glists_virt_base = malloc(num_iqs * sizeof(void *), M_DEVBUF, M_NOWAIT | M_ZERO); lio->glists_dma_base = malloc(num_iqs * sizeof(vm_paddr_t), M_DEVBUF, M_NOWAIT | M_ZERO); if ((lio->glists_virt_base == NULL) || (lio->glists_dma_base == NULL)) { lio_delete_glists(oct, lio); return (1); } for (i = 0; i < num_iqs; i++) { mtx_init(&lio->glist_lock[i], "glist_lock", NULL, MTX_DEF); STAILQ_INIT(&lio->ghead[i]); lio->glists_virt_base[i] = lio_dma_alloc(lio->glist_entry_size * lio->tx_qsize, (vm_paddr_t *)&lio->glists_dma_base[i]); if (lio->glists_virt_base[i] == NULL) { lio_delete_glists(oct, lio); return (1); } for (j = 0; j < lio->tx_qsize; j++) { g = malloc(sizeof(*g), M_DEVBUF, M_NOWAIT | M_ZERO); if (g == NULL) break; g->sg = (struct lio_sg_entry *)(uintptr_t) ((uint64_t)(uintptr_t)lio->glists_virt_base[i] + (j * lio->glist_entry_size)); g->sg_dma_ptr = (uint64_t)lio->glists_dma_base[i] + (j * lio->glist_entry_size); STAILQ_INSERT_TAIL(&lio->ghead[i], &g->node, entries); } if (j != lio->tx_qsize) { lio_delete_glists(oct, lio); return (1); } } return (0); } void -lio_stop(struct ifnet *ifp) +lio_stop(if_t ifp) { struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; lio_ifstate_reset(lio, LIO_IFSTATE_RUNNING); if_link_state_change(ifp, LINK_STATE_DOWN); lio->intf_open = 0; lio->linfo.link.s.link_up = 0; lio->link_changes++; lio_send_rx_ctrl_cmd(lio, 0); /* Tell the stack that the interface is no longer active */ if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); lio_dev_info(oct, "Interface is stopped\n"); } static void lio_check_rx_oom_status(struct lio *lio) { struct lio_droq *droq; struct octeon_device *oct = lio->oct_dev; int desc_refilled; int q, q_no = 0; for (q = 0; q < oct->num_oqs; q++) { q_no = lio->linfo.rxpciq[q].s.q_no; droq = oct->droq[q_no]; if (droq == NULL) continue; if (lio_read_csr32(oct, droq->pkts_credit_reg) <= 0x40) { mtx_lock(&droq->lock); desc_refilled = lio_droq_refill(oct, droq); /* * Flush the droq descriptor data to memory to be sure * that when we update the credits the data in memory * is accurate. */ wmb(); lio_write_csr32(oct, droq->pkts_credit_reg, desc_refilled); /* make sure mmio write completes */ __compiler_membar(); mtx_unlock(&droq->lock); } } } static void lio_poll_check_rx_oom_status(void *arg, int pending __unused) { struct lio_tq *rx_status_tq = arg; struct lio *lio = rx_status_tq->ctxptr; if (lio_ifstate_check(lio, LIO_IFSTATE_RUNNING)) lio_check_rx_oom_status(lio); taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work, lio_ms_to_ticks(50)); } static int -lio_setup_rx_oom_poll_fn(struct ifnet *ifp) +lio_setup_rx_oom_poll_fn(if_t ifp) { struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; struct lio_tq *rx_status_tq; rx_status_tq = &lio->rx_status_tq; rx_status_tq->tq = taskqueue_create("lio_rx_oom_status", M_WAITOK, taskqueue_thread_enqueue, &rx_status_tq->tq); if (rx_status_tq->tq == NULL) { lio_dev_err(oct, "unable to create lio rx oom status tq\n"); return (-1); } TIMEOUT_TASK_INIT(rx_status_tq->tq, &rx_status_tq->work, 0, lio_poll_check_rx_oom_status, (void *)rx_status_tq); rx_status_tq->ctxptr = lio; taskqueue_start_threads(&rx_status_tq->tq, 1, PI_NET, "lio%d_rx_oom_status", oct->octeon_id); taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work, lio_ms_to_ticks(50)); return (0); } static void -lio_cleanup_rx_oom_poll_fn(struct ifnet *ifp) +lio_cleanup_rx_oom_poll_fn(if_t ifp) { struct lio *lio = if_getsoftc(ifp); if (lio->rx_status_tq.tq != NULL) { while (taskqueue_cancel_timeout(lio->rx_status_tq.tq, &lio->rx_status_tq.work, NULL)) taskqueue_drain_timeout(lio->rx_status_tq.tq, &lio->rx_status_tq.work); taskqueue_free(lio->rx_status_tq.tq); lio->rx_status_tq.tq = NULL; } } static void lio_destroy_nic_device(struct octeon_device *oct, int ifidx) { - struct ifnet *ifp = oct->props.ifp; + if_t ifp = oct->props.ifp; struct lio *lio; if (ifp == NULL) { lio_dev_err(oct, "%s No ifp ptr for index %d\n", __func__, ifidx); return; } lio = if_getsoftc(ifp); lio_ifstate_set(lio, LIO_IFSTATE_DETACH); lio_dev_dbg(oct, "NIC device cleanup\n"); if (atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_RUNNING) lio_stop(ifp); if (lio_wait_for_pending_requests(oct)) lio_dev_err(oct, "There were pending requests\n"); if (lio_wait_for_instr_fetch(oct)) lio_dev_err(oct, "IQ had pending instructions\n"); if (lio_wait_for_oq_pkts(oct)) lio_dev_err(oct, "OQ had pending packets\n"); if (atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_REGISTERED) ether_ifdetach(ifp); lio_tcp_lro_free(oct, ifp); lio_cleanup_rx_oom_poll_fn(ifp); lio_delete_glists(oct, lio); EVENTHANDLER_DEREGISTER(vlan_config, lio->vlan_attach); EVENTHANDLER_DEREGISTER(vlan_unconfig, lio->vlan_detach); free(lio, M_DEVBUF); if_free(ifp); oct->props.gmxport = -1; oct->props.ifp = NULL; } static void -print_link_info(struct ifnet *ifp) +print_link_info(if_t ifp) { struct lio *lio = if_getsoftc(ifp); if (!lio_ifstate_check(lio, LIO_IFSTATE_RESETTING) && lio_ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { struct octeon_link_info *linfo = &lio->linfo; if (linfo->link.s.link_up) { lio_dev_info(lio->oct_dev, "%d Mbps %s Duplex UP\n", linfo->link.s.speed, (linfo->link.s.duplex) ? "Full" : "Half"); } else { lio_dev_info(lio->oct_dev, "Link Down\n"); } } } static inline void -lio_update_link_status(struct ifnet *ifp, union octeon_link_status *ls) +lio_update_link_status(if_t ifp, union octeon_link_status *ls) { struct lio *lio = if_getsoftc(ifp); int changed = (lio->linfo.link.link_status64 != ls->link_status64); lio->linfo.link.link_status64 = ls->link_status64; if ((lio->intf_open) && (changed)) { print_link_info(ifp); lio->link_changes++; if (lio->linfo.link.s.link_up) if_link_state_change(ifp, LINK_STATE_UP); else if_link_state_change(ifp, LINK_STATE_DOWN); } } /* * \brief Callback for rx ctrl * @param status status of request * @param buf pointer to resp structure */ static void lio_rx_ctl_callback(struct octeon_device *oct, uint32_t status, void *buf) { struct lio_soft_command *sc = (struct lio_soft_command *)buf; struct lio_rx_ctl_context *ctx; ctx = (struct lio_rx_ctl_context *)sc->ctxptr; oct = lio_get_device(ctx->octeon_id); if (status) lio_dev_err(oct, "rx ctl instruction failed. Status: %llx\n", LIO_CAST64(status)); ctx->cond = 1; /* * This barrier is required to be sure that the response has been * written fully before waking up the handler */ wmb(); } static void lio_send_rx_ctrl_cmd(struct lio *lio, int start_stop) { struct lio_soft_command *sc; struct lio_rx_ctl_context *ctx; union octeon_cmd *ncmd; struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; int ctx_size = sizeof(struct lio_rx_ctl_context); int retval; if (oct->props.rx_on == start_stop) return; sc = lio_alloc_soft_command(oct, OCTEON_CMD_SIZE, 16, ctx_size); if (sc == NULL) return; ncmd = (union octeon_cmd *)sc->virtdptr; ctx = (struct lio_rx_ctl_context *)sc->ctxptr; ctx->cond = 0; ctx->octeon_id = lio_get_device_id(oct); ncmd->cmd64 = 0; ncmd->s.cmd = LIO_CMD_RX_CTL; ncmd->s.param1 = start_stop; lio_swap_8B_data((uint64_t *)ncmd, (OCTEON_CMD_SIZE >> 3)); sc->iq_no = lio->linfo.txpciq[0].s.q_no; lio_prepare_soft_command(oct, sc, LIO_OPCODE_NIC, LIO_OPCODE_NIC_CMD, 0, 0, 0); sc->callback = lio_rx_ctl_callback; sc->callback_arg = sc; sc->wait_time = 5000; retval = lio_send_soft_command(oct, sc); if (retval == LIO_IQ_SEND_FAILED) { lio_dev_err(oct, "Failed to send RX Control message\n"); } else { /* * Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ lio_sleep_cond(oct, &ctx->cond); oct->props.rx_on = start_stop; } lio_free_soft_command(oct, sc); } static void -lio_vlan_rx_add_vid(void *arg, struct ifnet *ifp, uint16_t vid) +lio_vlan_rx_add_vid(void *arg, if_t ifp, uint16_t vid) { struct lio_ctrl_pkt nctrl; struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; int ret = 0; if (if_getsoftc(ifp) != arg) /* Not our event */ return; if ((vid == 0) || (vid > 4095)) /* Invalid */ return; bzero(&nctrl, sizeof(struct lio_ctrl_pkt)); nctrl.ncmd.cmd64 = 0; nctrl.ncmd.s.cmd = LIO_CMD_ADD_VLAN_FILTER; nctrl.ncmd.s.param1 = vid; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 100; nctrl.lio = lio; nctrl.cb_fn = lio_ctrl_cmd_completion; ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { lio_dev_err(oct, "Add VLAN filter failed in core (ret: 0x%x)\n", ret); } } static void -lio_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, uint16_t vid) +lio_vlan_rx_kill_vid(void *arg, if_t ifp, uint16_t vid) { struct lio_ctrl_pkt nctrl; struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; int ret = 0; if (if_getsoftc(ifp) != arg) /* Not our event */ return; if ((vid == 0) || (vid > 4095)) /* Invalid */ return; bzero(&nctrl, sizeof(struct lio_ctrl_pkt)); nctrl.ncmd.cmd64 = 0; nctrl.ncmd.s.cmd = LIO_CMD_DEL_VLAN_FILTER; nctrl.ncmd.s.param1 = vid; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 100; nctrl.lio = lio; nctrl.cb_fn = lio_ctrl_cmd_completion; ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { lio_dev_err(oct, "Kill VLAN filter failed in core (ret: 0x%x)\n", ret); } } static int lio_wait_for_oq_pkts(struct octeon_device *oct) { int i, pending_pkts, pkt_cnt = 0, retry = 100; do { pending_pkts = 0; for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) { if (!(oct->io_qmask.oq & BIT_ULL(i))) continue; pkt_cnt = lio_droq_check_hw_for_pkts(oct->droq[i]); if (pkt_cnt > 0) { pending_pkts += pkt_cnt; taskqueue_enqueue(oct->droq[i]->droq_taskqueue, &oct->droq[i]->droq_task); } } pkt_cnt = 0; lio_sleep_timeout(1); } while (retry-- && pending_pkts); return (pkt_cnt); } static void lio_destroy_resources(struct octeon_device *oct) { int i, refcount; switch (atomic_load_acq_int(&oct->status)) { case LIO_DEV_RUNNING: case LIO_DEV_CORE_OK: /* No more instructions will be forwarded. */ atomic_store_rel_int(&oct->status, LIO_DEV_IN_RESET); oct->app_mode = LIO_DRV_INVALID_APP; lio_dev_dbg(oct, "Device state is now %s\n", lio_get_state_string(&oct->status)); lio_sleep_timeout(100); /* fallthrough */ case LIO_DEV_HOST_OK: /* fallthrough */ case LIO_DEV_CONSOLE_INIT_DONE: /* Remove any consoles */ lio_remove_consoles(oct); /* fallthrough */ case LIO_DEV_IO_QUEUES_DONE: if (lio_wait_for_pending_requests(oct)) lio_dev_err(oct, "There were pending requests\n"); if (lio_wait_for_instr_fetch(oct)) lio_dev_err(oct, "IQ had pending instructions\n"); /* * Disable the input and output queues now. No more packets will * arrive from Octeon, but we should wait for all packet * processing to finish. */ oct->fn_list.disable_io_queues(oct); if (lio_wait_for_oq_pkts(oct)) lio_dev_err(oct, "OQ had pending packets\n"); /* fallthrough */ case LIO_DEV_INTR_SET_DONE: /* Disable interrupts */ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); if (oct->msix_on) { for (i = 0; i < oct->num_msix_irqs - 1; i++) { if (oct->ioq_vector[i].tag != NULL) { bus_teardown_intr(oct->device, oct->ioq_vector[i].msix_res, oct->ioq_vector[i].tag); oct->ioq_vector[i].tag = NULL; } if (oct->ioq_vector[i].msix_res != NULL) { bus_release_resource(oct->device, SYS_RES_IRQ, oct->ioq_vector[i].vector, oct->ioq_vector[i].msix_res); oct->ioq_vector[i].msix_res = NULL; } } /* non-iov vector's argument is oct struct */ if (oct->tag != NULL) { bus_teardown_intr(oct->device, oct->msix_res, oct->tag); oct->tag = NULL; } if (oct->msix_res != NULL) { bus_release_resource(oct->device, SYS_RES_IRQ, oct->aux_vector, oct->msix_res); oct->msix_res = NULL; } pci_release_msi(oct->device); } /* fallthrough */ case LIO_DEV_IN_RESET: case LIO_DEV_DROQ_INIT_DONE: /* Wait for any pending operations */ lio_mdelay(100); for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) { if (!(oct->io_qmask.oq & BIT_ULL(i))) continue; lio_delete_droq(oct, i); } /* fallthrough */ case LIO_DEV_RESP_LIST_INIT_DONE: for (i = 0; i < LIO_MAX_POSSIBLE_OUTPUT_QUEUES; i++) { if (oct->droq[i] != NULL) { free(oct->droq[i], M_DEVBUF); oct->droq[i] = NULL; } } lio_delete_response_list(oct); /* fallthrough */ case LIO_DEV_INSTR_QUEUE_INIT_DONE: for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) { if (!(oct->io_qmask.iq & BIT_ULL(i))) continue; lio_delete_instr_queue(oct, i); } /* fallthrough */ case LIO_DEV_MSIX_ALLOC_VECTOR_DONE: for (i = 0; i < LIO_MAX_POSSIBLE_INSTR_QUEUES; i++) { if (oct->instr_queue[i] != NULL) { free(oct->instr_queue[i], M_DEVBUF); oct->instr_queue[i] = NULL; } } lio_free_ioq_vector(oct); /* fallthrough */ case LIO_DEV_SC_BUFF_POOL_INIT_DONE: lio_free_sc_buffer_pool(oct); /* fallthrough */ case LIO_DEV_DISPATCH_INIT_DONE: lio_delete_dispatch_list(oct); /* fallthrough */ case LIO_DEV_PCI_MAP_DONE: refcount = lio_deregister_device(oct); if (fw_type_is_none()) lio_pci_flr(oct); if (!refcount) oct->fn_list.soft_reset(oct); lio_unmap_pci_barx(oct, 0); lio_unmap_pci_barx(oct, 1); /* fallthrough */ case LIO_DEV_PCI_ENABLE_DONE: /* Disable the device, releasing the PCI INT */ pci_disable_busmaster(oct->device); /* fallthrough */ case LIO_DEV_BEGIN_STATE: break; } /* end switch (oct->status) */ } diff --git a/sys/dev/liquidio/lio_network.h b/sys/dev/liquidio/lio_network.h index 5a843e4cf8ae..b29fc200a510 100644 --- a/sys/dev/liquidio/lio_network.h +++ b/sys/dev/liquidio/lio_network.h @@ -1,293 +1,293 @@ /* * BSD LICENSE * * Copyright(c) 2017 Cavium, Inc.. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Cavium, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ /* \file lio_network.h * \brief Host NIC Driver: Structure and Macro definitions used by NIC Module. */ #ifndef __LIO_NETWORK_H__ #define __LIO_NETWORK_H__ #include "lio_rss.h" #define LIO_MIN_MTU_SIZE 72 #define LIO_MAX_MTU_SIZE (LIO_MAX_FRM_SIZE - LIO_FRM_HEADER_SIZE) #define LIO_MAX_SG 64 #define LIO_MAX_FRAME_SIZE 60000 struct lio_fw_stats_resp { uint64_t rh; struct octeon_link_stats stats; uint64_t status; }; /* LiquidIO per-interface network private data */ struct lio { /* State of the interface. Rx/Tx happens only in the RUNNING state. */ int ifstate; /* * Octeon Interface index number. This device will be represented as * oct in the system. */ int ifidx; /* Octeon Input queue to use to transmit for this network interface. */ int txq; /* * Octeon Output queue from which pkts arrive * for this network interface. */ int rxq; /* Guards each glist */ struct mtx *glist_lock; #define LIO_DEFAULT_STATS_INTERVAL 10000 /* callout timer for stats */ struct callout stats_timer; /* Stats Update Interval in milli Seconds */ uint16_t stats_interval; /* IRQ coalescing driver stats */ struct octeon_intrmod_cfg intrmod_cfg; /* Array of gather component linked lists */ struct lio_stailq_head *ghead; void **glists_virt_base; vm_paddr_t *glists_dma_base; uint32_t glist_entry_size; /* Pointer to the octeon device structure. */ struct octeon_device *oct_dev; - struct ifnet *ifp; + if_t ifp; struct ifmedia ifmedia; int if_flags; /* Link information sent by the core application for this interface. */ struct octeon_link_info linfo; /* counter of link changes */ uint64_t link_changes; /* Size of Tx queue for this octeon device. */ uint32_t tx_qsize; /* Size of Rx queue for this octeon device. */ uint32_t rx_qsize; /* Size of MTU this octeon device. */ uint32_t mtu; /* msg level flag per interface. */ uint32_t msg_enable; /* Interface info */ uint32_t intf_open; /* task queue for rx oom status */ struct lio_tq rx_status_tq; /* VLAN Filtering related */ eventhandler_tag vlan_attach; eventhandler_tag vlan_detach; #ifdef RSS struct lio_rss_params_set rss_set; #endif /* RSS */ }; #define LIO_MAX_CORES 12 /* * \brief Enable or disable feature * @param ifp pointer to network device * @param cmd Command that just requires acknowledgment * @param param1 Parameter to command */ -int lio_set_feature(struct ifnet *ifp, int cmd, uint16_t param1); +int lio_set_feature(if_t ifp, int cmd, uint16_t param1); /* * \brief Link control command completion callback * @param nctrl_ptr pointer to control packet structure * * This routine is called by the callback function when a ctrl pkt sent to * core app completes. The nctrl_ptr contains a copy of the command type * and data sent to the core app. This routine is only called if the ctrl * pkt was sent successfully to the core app. */ void lio_ctrl_cmd_completion(void *nctrl_ptr); int lio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx, uint32_t num_iqs, uint32_t num_oqs); int lio_setup_interrupt(struct octeon_device *oct, uint32_t num_ioqs); static inline void * lio_recv_buffer_alloc(uint32_t size) { struct mbuf *mb = NULL; mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size); if (mb != NULL) mb->m_pkthdr.len = mb->m_len = size; return ((void *)mb); } static inline void lio_recv_buffer_free(void *buffer) { m_freem((struct mbuf *)buffer); } static inline int lio_get_order(unsigned long size) { int order; size = (size - 1) >> PAGE_SHIFT; order = 0; while (size) { order++; size >>= 1; } return (order); } static inline void * lio_dma_alloc(size_t size, vm_paddr_t *dma_handle) { size_t align; void *mem; align = PAGE_SIZE << lio_get_order(size); mem = kmem_alloc_contig(size, M_WAITOK, 0, ~0ul, align, 0, VM_MEMATTR_DEFAULT); if (mem != NULL) *dma_handle = vtophys(mem); else *dma_handle = 0; return (mem); } static inline void lio_dma_free(size_t size, void *cpu_addr) { kmem_free(cpu_addr, size); } static inline uint64_t lio_map_ring(device_t dev, void *buf, uint32_t size) { vm_paddr_t dma_addr; dma_addr = vtophys(((struct mbuf *)buf)->m_data); return ((uint64_t)dma_addr); } /* * \brief check interface state * @param lio per-network private data * @param state_flag flag state to check */ static inline int lio_ifstate_check(struct lio *lio, int state_flag) { return (atomic_load_acq_int(&lio->ifstate) & state_flag); } /* * \brief set interface state * @param lio per-network private data * @param state_flag flag state to set */ static inline void lio_ifstate_set(struct lio *lio, int state_flag) { atomic_store_rel_int(&lio->ifstate, (atomic_load_acq_int(&lio->ifstate) | state_flag)); } /* * \brief clear interface state * @param lio per-network private data * @param state_flag flag state to clear */ static inline void lio_ifstate_reset(struct lio *lio, int state_flag) { atomic_store_rel_int(&lio->ifstate, (atomic_load_acq_int(&lio->ifstate) & ~(state_flag))); } /* * \brief wait for all pending requests to complete * @param oct Pointer to Octeon device * * Called during shutdown sequence */ static inline int lio_wait_for_pending_requests(struct octeon_device *oct) { int i, pcount = 0; for (i = 0; i < 100; i++) { pcount = atomic_load_acq_int( &oct->response_list[LIO_ORDERED_SC_LIST]. pending_req_count); if (pcount) lio_sleep_timeout(100); else break; } if (pcount) return (1); return (0); } #endif /* __LIO_NETWORK_H__ */ diff --git a/sys/dev/liquidio/lio_rxtx.c b/sys/dev/liquidio/lio_rxtx.c index c966797c5e3c..456fcc3449d6 100644 --- a/sys/dev/liquidio/lio_rxtx.c +++ b/sys/dev/liquidio/lio_rxtx.c @@ -1,325 +1,325 @@ /* * BSD LICENSE * * Copyright(c) 2017 Cavium, Inc.. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Cavium, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "lio_bsd.h" #include "lio_common.h" #include "lio_droq.h" #include "lio_iq.h" #include "lio_response_manager.h" #include "lio_device.h" #include "lio_ctrl.h" #include "lio_main.h" #include "lio_network.h" #include "lio_rxtx.h" int lio_xmit(struct lio *lio, struct lio_instr_queue *iq, struct mbuf **m_headp) { struct lio_data_pkt ndata; union lio_cmd_setup cmdsetup; struct lio_mbuf_free_info *finfo = NULL; struct octeon_device *oct = iq->oct_dev; struct lio_iq_stats *stats; struct octeon_instr_irh *irh; struct lio_request_list *tx_buf; union lio_tx_info *tx_info; struct mbuf *m_head; bus_dma_segment_t segs[LIO_MAX_SG]; bus_dmamap_t map; uint64_t dptr = 0; uint32_t tag = 0; int iq_no = 0; int nsegs; int status = 0; iq_no = iq->txpciq.s.q_no; tag = iq_no; stats = &oct->instr_queue[iq_no]->stats; tx_buf = iq->request_list + iq->host_write_index; /* * Check for all conditions in which the current packet cannot be * transmitted. */ if (!(atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_RUNNING) || (!lio->linfo.link.s.link_up)) { lio_dev_info(oct, "Transmit failed link_status : %d\n", lio->linfo.link.s.link_up); status = ENETDOWN; goto drop_packet; } if (lio_iq_is_full(oct, iq_no)) { /* Defer sending if queue is full */ lio_dev_dbg(oct, "Transmit failed iq:%d full\n", iq_no); stats->tx_iq_busy++; return (ENOBUFS); } map = tx_buf->map; status = bus_dmamap_load_mbuf_sg(iq->txtag, map, *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); if (status == EFBIG) { struct mbuf *m; m = m_defrag(*m_headp, M_NOWAIT); if (m == NULL) { stats->mbuf_defrag_failed++; goto drop_packet; } *m_headp = m; status = bus_dmamap_load_mbuf_sg(iq->txtag, map, *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); } if (status == ENOMEM) { goto retry; } else if (status) { stats->tx_dmamap_fail++; lio_dev_dbg(oct, "bus_dmamap_load_mbuf_sg failed with error %d. iq:%d", status, iq_no); goto drop_packet; } m_head = *m_headp; /* Info used to unmap and free the buffers. */ finfo = &tx_buf->finfo; finfo->map = map; finfo->mb = m_head; /* Prepare the attributes for the data to be passed to OSI. */ bzero(&ndata, sizeof(struct lio_data_pkt)); ndata.buf = (void *)finfo; ndata.q_no = iq_no; ndata.datasize = m_head->m_pkthdr.len; cmdsetup.cmd_setup64 = 0; cmdsetup.s.iq_no = iq_no; if (m_head->m_pkthdr.csum_flags & CSUM_IP) cmdsetup.s.ip_csum = 1; if ((m_head->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)) || (m_head->m_pkthdr.csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP))) cmdsetup.s.transport_csum = 1; if (nsegs == 1) { cmdsetup.s.u.datasize = segs[0].ds_len; lio_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); dptr = segs[0].ds_addr; ndata.cmd.cmd3.dptr = dptr; ndata.reqtype = LIO_REQTYPE_NORESP_NET; } else { struct lio_gather *g; int i; mtx_lock(&lio->glist_lock[iq_no]); g = (struct lio_gather *) lio_delete_first_node(&lio->ghead[iq_no]); mtx_unlock(&lio->glist_lock[iq_no]); if (g == NULL) { lio_dev_err(oct, "Transmit scatter gather: glist null!\n"); goto retry; } cmdsetup.s.gather = 1; cmdsetup.s.u.gatherptrs = nsegs; lio_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); bzero(g->sg, g->sg_size); i = 0; while (nsegs--) { g->sg[(i >> 2)].ptr[(i & 3)] = segs[i].ds_addr; lio_add_sg_size(&g->sg[(i >> 2)], segs[i].ds_len, (i & 3)); i++; } dptr = g->sg_dma_ptr; ndata.cmd.cmd3.dptr = dptr; finfo->g = g; ndata.reqtype = LIO_REQTYPE_NORESP_NET_SG; } irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; tx_info = (union lio_tx_info *)&ndata.cmd.cmd3.ossp[0]; if (m_head->m_pkthdr.csum_flags & (CSUM_IP_TSO | CSUM_IP6_TSO)) { tx_info->s.gso_size = m_head->m_pkthdr.tso_segsz; tx_info->s.gso_segs = howmany(m_head->m_pkthdr.len, m_head->m_pkthdr.tso_segsz); stats->tx_gso++; } /* HW insert VLAN tag */ if (m_head->m_flags & M_VLANTAG) { irh->priority = m_head->m_pkthdr.ether_vtag >> 13; irh->vlan = m_head->m_pkthdr.ether_vtag & 0xfff; } status = lio_send_data_pkt(oct, &ndata); if (status == LIO_IQ_SEND_FAILED) goto retry; if (tx_info->s.gso_segs) stats->tx_done += tx_info->s.gso_segs; else stats->tx_done++; stats->tx_tot_bytes += ndata.datasize; return (0); retry: return (ENOBUFS); drop_packet: stats->tx_dropped++; lio_dev_err(oct, "IQ%d Transmit dropped: %llu\n", iq_no, LIO_CAST64(stats->tx_dropped)); m_freem(*m_headp); *m_headp = NULL; return (status); } int -lio_mq_start_locked(struct ifnet *ifp, struct lio_instr_queue *iq) +lio_mq_start_locked(if_t ifp, struct lio_instr_queue *iq) { struct lio *lio = if_getsoftc(ifp); struct mbuf *next; int err = 0; if (((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) || (!lio->linfo.link.s.link_up)) return (-ENETDOWN); /* Process the queue */ while ((next = drbr_peek(ifp, iq->br)) != NULL) { err = lio_xmit(lio, iq, &next); if (err) { if (next == NULL) drbr_advance(ifp, iq->br); else drbr_putback(ifp, iq->br, next); break; } drbr_advance(ifp, iq->br); /* Send a copy of the frame to the BPF listener */ ETHER_BPF_MTAP(ifp, next); if (((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) || (!lio->linfo.link.s.link_up)) break; } return (err); } int -lio_mq_start(struct ifnet *ifp, struct mbuf *m) +lio_mq_start(if_t ifp, struct mbuf *m) { struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; struct lio_instr_queue *iq; int err = 0, i; #ifdef RSS uint32_t bucket_id; #endif if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { #ifdef RSS if (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m), &bucket_id) == 0) { i = bucket_id % oct->num_iqs; if (bucket_id > oct->num_iqs) lio_dev_dbg(oct, "bucket_id (%d) > num_iqs (%d)\n", bucket_id, oct->num_iqs); } else #endif i = m->m_pkthdr.flowid % oct->num_iqs; } else i = curcpu % oct->num_iqs; iq = oct->instr_queue[i]; err = drbr_enqueue(ifp, iq->br, m); if (err) return (err); if (mtx_trylock(&iq->enq_lock)) { lio_mq_start_locked(ifp, iq); mtx_unlock(&iq->enq_lock); } return (err); } void -lio_qflush(struct ifnet *ifp) +lio_qflush(if_t ifp) { struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; struct lio_instr_queue *iq; struct mbuf *m; int i; for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) { if (!(oct->io_qmask.iq & BIT_ULL(i))) continue; iq = oct->instr_queue[i]; mtx_lock(&iq->enq_lock); while ((m = buf_ring_dequeue_sc(iq->br)) != NULL) m_freem(m); mtx_unlock(&iq->enq_lock); } if_qflush(ifp); } diff --git a/sys/dev/liquidio/lio_rxtx.h b/sys/dev/liquidio/lio_rxtx.h index cff25af147fb..b4059b35b1ea 100644 --- a/sys/dev/liquidio/lio_rxtx.h +++ b/sys/dev/liquidio/lio_rxtx.h @@ -1,86 +1,86 @@ /* * BSD LICENSE * * Copyright(c) 2017 Cavium, Inc.. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Cavium, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _LIO_RXTX_H_ #define _LIO_RXTX_H_ /* Bit mask values for lio->ifstate */ #define LIO_IFSTATE_DROQ_OPS 0x01 #define LIO_IFSTATE_REGISTERED 0x02 #define LIO_IFSTATE_RUNNING 0x04 #define LIO_IFSTATE_DETACH 0x08 #define LIO_IFSTATE_RESETTING 0x10 /* * Structure of a node in list of gather components maintained by * NIC driver for each network device. */ struct lio_gather { /* List manipulation. Next and prev pointers. */ struct lio_stailq_node node; /* Size of the gather component at sg in bytes. */ int sg_size; /* * Gather component that can accommodate max sized fragment list * received from the IP layer. */ struct lio_sg_entry *sg; uint64_t sg_dma_ptr; }; union lio_tx_info { uint64_t tx_info64; struct { #if _BYTE_ORDER == _BIG_ENDIAN uint16_t gso_size; uint16_t gso_segs; uint32_t reserved; #else /* _BYTE_ORDER == _LITTLE_ENDIAN */ uint32_t reserved; uint16_t gso_segs; uint16_t gso_size; #endif } s; }; int lio_xmit(struct lio *lio, struct lio_instr_queue *iq, struct mbuf **m_headp); -int lio_mq_start_locked(struct ifnet *ifp, struct lio_instr_queue *iq); -int lio_mq_start(struct ifnet *ifp, struct mbuf *m); -void lio_qflush(struct ifnet *ifp); +int lio_mq_start_locked(if_t ifp, struct lio_instr_queue *iq); +int lio_mq_start(if_t ifp, struct mbuf *m); +void lio_qflush(if_t ifp); #endif /* _LIO_RXTX_H_ */ diff --git a/sys/dev/liquidio/lio_sysctl.c b/sys/dev/liquidio/lio_sysctl.c index cee520e63b60..c7a9bc5cda0e 100644 --- a/sys/dev/liquidio/lio_sysctl.c +++ b/sys/dev/liquidio/lio_sysctl.c @@ -1,1973 +1,1973 @@ /* * BSD LICENSE * * Copyright(c) 2017 Cavium, Inc.. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Cavium, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include #include "lio_bsd.h" #include "lio_common.h" #include "lio_droq.h" #include "lio_iq.h" #include "lio_response_manager.h" #include "lio_device.h" #include "lio_network.h" #include "lio_ctrl.h" #include "cn23xx_pf_device.h" #include "lio_image.h" #include "lio_main.h" #include "lio_rxtx.h" #include "lio_ioctl.h" #define LIO_OFF_PAUSE 0 #define LIO_RX_PAUSE 1 #define LIO_TX_PAUSE 2 #define LIO_REGDUMP_LEN 4096 #define LIO_REGDUMP_LEN_23XX 49248 #define LIO_REGDUMP_LEN_XXXX LIO_REGDUMP_LEN_23XX #define LIO_USE_ADAPTIVE_RX_COALESCE 1 #define LIO_USE_ADAPTIVE_TX_COALESCE 2 #define LIO_RX_COALESCE_USECS 3 #define LIO_RX_MAX_COALESCED_FRAMES 4 #define LIO_TX_MAX_COALESCED_FRAMES 8 #define LIO_PKT_RATE_LOW 12 #define LIO_RX_COALESCE_USECS_LOW 13 #define LIO_RX_MAX_COALESCED_FRAMES_LOW 14 #define LIO_TX_MAX_COALESCED_FRAMES_LOW 16 #define LIO_PKT_RATE_HIGH 17 #define LIO_RX_COALESCE_USECS_HIGH 18 #define LIO_RX_MAX_COALESCED_FRAMES_HIGH 19 #define LIO_TX_MAX_COALESCED_FRAMES_HIGH 21 #define LIO_RATE_SAMPLE_INTERVAL 22 #define LIO_SET_RING_RX 1 #define LIO_SET_RING_TX 2 static int lio_get_eeprom(SYSCTL_HANDLER_ARGS); static int lio_get_set_pauseparam(SYSCTL_HANDLER_ARGS); static int lio_get_regs(SYSCTL_HANDLER_ARGS); static int lio_cn23xx_pf_read_csr_reg(char *s, struct octeon_device *oct); static int lio_get_set_fwmsglevel(SYSCTL_HANDLER_ARGS); static int lio_set_stats_interval(SYSCTL_HANDLER_ARGS); static void lio_get_fw_stats(void *arg); static int lio_get_set_intr_coalesce(SYSCTL_HANDLER_ARGS); static int lio_get_intrmod_cfg(struct lio *lio, struct octeon_intrmod_cfg *intr_cfg); static int lio_get_ringparam(SYSCTL_HANDLER_ARGS); static int lio_set_ringparam(SYSCTL_HANDLER_ARGS); static int lio_get_channels(SYSCTL_HANDLER_ARGS); static int lio_set_channels(SYSCTL_HANDLER_ARGS); static int lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs); struct lio_intrmod_context { int octeon_id; volatile int cond; int status; }; struct lio_intrmod_resp { uint64_t rh; struct octeon_intrmod_cfg intrmod; uint64_t status; }; static int -lio_send_queue_count_update(struct ifnet *ifp, uint32_t num_queues) +lio_send_queue_count_update(if_t ifp, uint32_t num_queues) { struct lio_ctrl_pkt nctrl; struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; int ret = 0; bzero(&nctrl, sizeof(struct lio_ctrl_pkt)); nctrl.ncmd.cmd64 = 0; nctrl.ncmd.s.cmd = LIO_CMD_QUEUE_COUNT_CTL; nctrl.ncmd.s.param1 = num_queues; nctrl.ncmd.s.param2 = num_queues; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 100; nctrl.lio = lio; nctrl.cb_fn = lio_ctrl_cmd_completion; ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { lio_dev_err(oct, "Failed to send Queue reset command (ret: 0x%x)\n", ret); return (-1); } return (0); } /* Add sysctl variables to the system, one per statistic. */ void lio_add_hw_stats(struct lio *lio) { struct octeon_device *oct_dev = lio->oct_dev; device_t dev = oct_dev->device; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); struct sysctl_oid *stat_node, *queue_node, *root_node; struct sysctl_oid_list *stat_list, *queue_list, *root_list; #define QUEUE_NAME_LEN 32 char namebuf[QUEUE_NAME_LEN]; callout_reset(&lio->stats_timer, lio_ms_to_ticks(lio->stats_interval), lio_get_fw_stats, lio); SYSCTL_ADD_STRING(ctx, child, OID_AUTO, "fwversion", CTLFLAG_RD, oct_dev->fw_info.lio_firmware_version, 0, "Firmware version"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "stats_interval", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, lio, 0, lio_set_stats_interval, "I", "Set Stats Updation Timer in milli seconds"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "link_state_changes", CTLFLAG_RD, &lio->link_changes, "Link Change Counter"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "eeprom-dump", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, lio, 0, lio_get_eeprom, "A", "EEPROM information"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, lio, 0, lio_get_set_pauseparam, "I", "Get and set pause parameters.\n" \ "0 - off\n" \ "1 - rx pause\n" \ "2 - tx pause \n" \ "3 - rx and tx pause"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "register-dump", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, lio, 0, lio_get_regs, "A", "Dump registers in raw format"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fwmsglevel", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, lio, 0, lio_get_set_fwmsglevel, "I", "Get or set firmware message level"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxq_descriptors", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, lio, LIO_SET_RING_RX, lio_set_ringparam, "I", "Set RX ring parameter"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txq_descriptors", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, lio, LIO_SET_RING_TX, lio_set_ringparam, "I", "Set TX ring parameter"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "max_rxq_descriptors", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, lio, LIO_SET_RING_RX, lio_get_ringparam, "I", "Max RX descriptors"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "max_txq_descriptors", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, lio, LIO_SET_RING_TX, lio_get_ringparam, "I", "Max TX descriptors"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "active_queues", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, lio, 0, lio_set_channels, "I", "Set channels information"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "max_queues", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, lio, 0, lio_get_channels, "I", "Get channels information"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_budget", CTLFLAG_RW, &oct_dev->tx_budget, 0, "TX process pkt budget"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_budget", CTLFLAG_RW, &oct_dev->rx_budget, 0, "RX process pkt budget"); /* IRQ Coalescing Parameters */ root_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "coalesce", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Get and Set Coalesce"); root_list = SYSCTL_CHILDREN(root_node); if (lio_get_intrmod_cfg(lio, &lio->intrmod_cfg)) lio_dev_info(oct_dev, "Coalescing driver update failed!\n"); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "sample-interval", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_RATE_SAMPLE_INTERVAL, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "tx-frame-high", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_TX_MAX_COALESCED_FRAMES_HIGH, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "rx-frame-high", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_RX_MAX_COALESCED_FRAMES_HIGH, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "rx-usecs-high", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_RX_COALESCE_USECS_HIGH, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "pkt-rate-high", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_PKT_RATE_HIGH, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "tx-frame-low", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_TX_MAX_COALESCED_FRAMES_LOW, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "rx-frame-low", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_RX_MAX_COALESCED_FRAMES_LOW, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "rx-usecs-low", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_RX_COALESCE_USECS_LOW, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "pkt-rate-low", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_PKT_RATE_LOW, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "tx-frames", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_TX_MAX_COALESCED_FRAMES, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "rx-frames", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_RX_MAX_COALESCED_FRAMES, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "rx-usecs", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_RX_COALESCE_USECS, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "adaptive-tx", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_USE_ADAPTIVE_TX_COALESCE, lio_get_set_intr_coalesce, "QU", NULL); SYSCTL_ADD_PROC(ctx, root_list, OID_AUTO, "adaptive-rx", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, lio, LIO_USE_ADAPTIVE_RX_COALESCE, lio_get_set_intr_coalesce, "QU", NULL); /* Root Node of all the Stats */ root_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Root Node of all the Stats"); root_list = SYSCTL_CHILDREN(root_node); /* Firmware Tx Stats */ stat_node = SYSCTL_ADD_NODE(ctx, root_list, OID_AUTO, "fwtx", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Firmware Tx Statistics"); stat_list = SYSCTL_CHILDREN(stat_node); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_total_sent", CTLFLAG_RD, &oct_dev->link_stats.fromhost.fw_total_sent, "Firmware Total Packets Sent"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_total_fwd", CTLFLAG_RD, &oct_dev->link_stats.fromhost.fw_total_fwd, "Firmware Total Packets Forwarded"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_total_fwd_bytes", CTLFLAG_RD, &oct_dev->link_stats.fromhost.fw_total_fwd_bytes, "Firmware Total Bytes Forwarded"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_err_pko", CTLFLAG_RD, &oct_dev->link_stats.fromhost.fw_err_pko, "Firmware Tx PKO Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_err_pki", CTLFLAG_RD, &oct_dev->link_stats.fromhost.fw_err_pki, "Firmware Tx PKI Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_err_link", CTLFLAG_RD, &oct_dev->link_stats.fromhost.fw_err_link, "Firmware Tx Link Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_err_drop", CTLFLAG_RD, &oct_dev->link_stats.fromhost.fw_err_drop, "Firmware Tx Packets Dropped"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fw_tso", CTLFLAG_RD, &oct_dev->link_stats.fromhost.fw_tso, "Firmware Tx TSO"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_tso_packets", CTLFLAG_RD, &oct_dev->link_stats.fromhost.fw_tso_fwd, "Firmware Tx TSO Packets"); //SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_tso_err", CTLFLAG_RD, //&oct_dev->link_stats.fromhost.fw_tso_err, //"Firmware Tx TSO Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_vxlan", CTLFLAG_RD, &oct_dev->link_stats.fromhost.fw_tx_vxlan, "Firmware Tx VXLAN"); /* MAC Tx Stats */ stat_node = SYSCTL_ADD_NODE(ctx, root_list, OID_AUTO, "mactx", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Tx Statistics"); stat_list = SYSCTL_CHILDREN(stat_node); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_tx_total_pkts", CTLFLAG_RD, &oct_dev->link_stats.fromhost.total_pkts_sent, "Link-Level Total Packets Sent"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_tx_total_bytes", CTLFLAG_RD, &oct_dev->link_stats.fromhost.total_bytes_sent, "Link-Level Total Bytes Sent"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_tx_mcast_pkts", CTLFLAG_RD, &oct_dev->link_stats.fromhost.mcast_pkts_sent, "Link-Level Multicast Packets Sent"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_tx_bcast_pkts", CTLFLAG_RD, &oct_dev->link_stats.fromhost.bcast_pkts_sent, "Link-Level Broadcast Packets Sent"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_tx_ctl_packets", CTLFLAG_RD, &oct_dev->link_stats.fromhost.ctl_sent, "Link-Level Control Packets Sent"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_tx_total_collisions", CTLFLAG_RD, &oct_dev->link_stats.fromhost.total_collisions, "Link-Level Tx Total Collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_tx_one_collision", CTLFLAG_RD, &oct_dev->link_stats.fromhost.one_collision_sent, "Link-Level Tx One Collision Sent"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_tx_multi_collison", CTLFLAG_RD, &oct_dev->link_stats.fromhost.multi_collision_sent, "Link-Level Tx Multi-Collision Sent"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_tx_max_collision_fail", CTLFLAG_RD, &oct_dev->link_stats.fromhost.max_collision_fail, "Link-Level Tx Max Collision Failed"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_tx_max_deferal_fail", CTLFLAG_RD, &oct_dev->link_stats.fromhost.max_deferral_fail, "Link-Level Tx Max Deferral Failed"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_tx_fifo_err", CTLFLAG_RD, &oct_dev->link_stats.fromhost.fifo_err, "Link-Level Tx FIFO Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_tx_runts", CTLFLAG_RD, &oct_dev->link_stats.fromhost.runts, "Link-Level Tx Runts"); /* Firmware Rx Stats */ stat_node = SYSCTL_ADD_NODE(ctx, root_list, OID_AUTO, "fwrx", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Firmware Rx Statistics"); stat_list = SYSCTL_CHILDREN(stat_node); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_total_rcvd", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_total_rcvd, "Firmware Total Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_total_fwd", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_total_fwd, "Firmware Total Packets Forwarded"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_jabber_err", CTLFLAG_RD, &oct_dev->link_stats.fromwire.jabber_err, "Firmware Rx Jabber Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_l2_err", CTLFLAG_RD, &oct_dev->link_stats.fromwire.l2_err, "Firmware Rx L2 Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frame_err", CTLFLAG_RD, &oct_dev->link_stats.fromwire.frame_err, "Firmware Rx Frame Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_err_pko", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_err_pko, "Firmware Rx PKO Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_err_link", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_err_link, "Firmware Rx Link Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_err_drop", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_err_drop, "Firmware Rx Dropped"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_vxlan", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_rx_vxlan, "Firmware Rx VXLAN"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_vxlan_err", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_rx_vxlan_err, "Firmware Rx VXLAN Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_lro_pkts", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_lro_pkts, "Firmware Rx LRO Packets"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_lro_bytes", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_lro_octs, "Firmware Rx LRO Bytes"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_total_lro", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_total_lro, "Firmware Rx Total LRO"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_lro_aborts", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_lro_aborts, "Firmware Rx LRO Aborts"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_lro_aborts_port", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_lro_aborts_port, "Firmware Rx LRO Aborts Port"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_lro_aborts_seq", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_lro_aborts_seq, "Firmware Rx LRO Aborts Sequence"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_lro_aborts_tsval", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_lro_aborts_tsval, "Firmware Rx LRO Aborts tsval"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_lro_aborts_timer", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fw_lro_aborts_timer, "Firmware Rx LRO Aborts Timer"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_fwd_rate", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fwd_rate, "Firmware Rx Packets Forward Rate"); /* MAC Rx Stats */ stat_node = SYSCTL_ADD_NODE(ctx, root_list, OID_AUTO, "macrx", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Rx Statistics"); stat_list = SYSCTL_CHILDREN(stat_node); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_rx_total_rcvd", CTLFLAG_RD, &oct_dev->link_stats.fromwire.total_rcvd, "Link-Level Total Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_rx_bytes", CTLFLAG_RD, &oct_dev->link_stats.fromwire.bytes_rcvd, "Link-Level Total Bytes Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_rx_total_bcst", CTLFLAG_RD, &oct_dev->link_stats.fromwire.total_bcst, "Link-Level Total Broadcast"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_rx_total_mcst", CTLFLAG_RD, &oct_dev->link_stats.fromwire.total_mcst, "Link-Level Total Multicast"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_rx_runts", CTLFLAG_RD, &oct_dev->link_stats.fromwire.runts, "Link-Level Rx Runts"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_rx_ctl_packets", CTLFLAG_RD, &oct_dev->link_stats.fromwire.ctl_rcvd, "Link-Level Rx Control Packets"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_rx_fifo_err", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fifo_err, "Link-Level Rx FIFO Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_rx_dma_drop", CTLFLAG_RD, &oct_dev->link_stats.fromwire.dmac_drop, "Link-Level Rx DMA Dropped"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mac_rx_fcs_err", CTLFLAG_RD, &oct_dev->link_stats.fromwire.fcs_err, "Link-Level Rx FCS Errors"); /* TX */ for (int i = 0; i < oct_dev->num_iqs; i++) { if (!(oct_dev->io_qmask.iq & BIT_ULL(i))) continue; snprintf(namebuf, QUEUE_NAME_LEN, "tx-%d", i); queue_node = SYSCTL_ADD_NODE(ctx, root_list, OID_AUTO, namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Input Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); /* packets to network port */ /* # of packets tx to network */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.tx_done, "Number of Packets Tx to Network"); /* # of bytes tx to network */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.tx_tot_bytes, "Number of Bytes Tx to Network"); /* # of packets dropped */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.tx_dropped, "Number of Tx Packets Dropped"); /* # of tx fails due to queue full */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "iq_busy", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.tx_iq_busy, "Number of Tx Fails Due to Queue Full"); /* scatter gather entries sent */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "sgentry_sent", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.sgentry_sent, "Scatter Gather Entries Sent"); /* instruction to firmware: data and control */ /* # of instructions to the queue */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "fw_instr_posted", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.instr_posted, "Number of Instructions to The Queue"); /* # of instructions processed */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "fw_instr_processed", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.instr_processed, "Number of Instructions Processed"); /* # of instructions could not be processed */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "fw_instr_dropped", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.instr_dropped, "Number of Instructions Dropped"); /* bytes sent through the queue */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "fw_bytes_sent", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.bytes_sent, "Bytes Sent Through The Queue"); /* tso request */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.tx_gso, "TSO Request"); /* vxlan request */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "vxlan", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.tx_vxlan, "VXLAN Request"); /* txq restart */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "txq_restart", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.tx_restart, "TxQ Restart"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_fail", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.tx_dmamap_fail, "TxQ DMA Map Failed"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed", CTLFLAG_RD, &oct_dev->instr_queue[i]->stats.mbuf_defrag_failed, "TxQ defrag Failed"); } /* RX */ for (int i = 0; i < oct_dev->num_oqs; i++) { if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) continue; snprintf(namebuf, QUEUE_NAME_LEN, "rx-%d", i); queue_node = SYSCTL_ADD_NODE(ctx, root_list, OID_AUTO, namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Output Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); /* packets send to TCP/IP network stack */ /* # of packets to network stack */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets", CTLFLAG_RD, &oct_dev->droq[i]->stats.rx_pkts_received, "Number of Packets to Network Stack"); /* # of bytes to network stack */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes", CTLFLAG_RD, &oct_dev->droq[i]->stats.rx_bytes_received, "Number of Bytes to Network Stack"); /* # of packets dropped */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped_nomem", CTLFLAG_RD, &oct_dev->droq[i]->stats.dropped_nomem, "Packets Dropped Due to No Memory"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped_toomany", CTLFLAG_RD, &oct_dev->droq[i]->stats.dropped_toomany, "Packets dropped, Too Many Pkts to Process"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "fw_dropped", CTLFLAG_RD, &oct_dev->droq[i]->stats.rx_dropped, "Packets Dropped due to Receive path failures"); /* control and data path */ /* # packets sent to stack from this queue. */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "fw_pkts_received", CTLFLAG_RD, &oct_dev->droq[i]->stats.pkts_received, "Number of Packets Received"); /* # Bytes sent to stack from this queue. */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "fw_bytes_received", CTLFLAG_RD, &oct_dev->droq[i]->stats.bytes_received, "Number of Bytes Received"); /* Packets dropped due to no dispatch function. */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "fw_dropped_nodispatch", CTLFLAG_RD, &oct_dev->droq[i]->stats.dropped_nodispatch, "Packets Dropped, No Dispatch Function"); /* Rx VXLAN */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "vxlan", CTLFLAG_RD, &oct_dev->droq[i]->stats.rx_vxlan, "Rx VXLAN"); /* # failures of lio_recv_buffer_alloc */ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "buffer_alloc_failure", CTLFLAG_RD, &oct_dev->droq[i]->stats.rx_alloc_failure, "Number of Failures of lio_recv_buffer_alloc"); } } static int lio_get_eeprom(SYSCTL_HANDLER_ARGS) { struct lio *lio = (struct lio *)arg1; struct octeon_device *oct_dev = lio->oct_dev; struct lio_board_info *board_info; char buf[512]; board_info = (struct lio_board_info *)(&oct_dev->boardinfo); if (oct_dev->uboot_len == 0) sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld", board_info->name, board_info->serial_number, LIO_CAST64(board_info->major), LIO_CAST64(board_info->minor)); else { sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n%s", board_info->name, board_info->serial_number, LIO_CAST64(board_info->major), LIO_CAST64(board_info->minor), &oct_dev->uboot_version[oct_dev->uboot_sidx]); } return (sysctl_handle_string(oidp, buf, strlen(buf), req)); } /* * Get and set pause parameters or flow control using sysctl: * 0 - off * 1 - rx pause * 2 - tx pause * 3 - full */ static int lio_get_set_pauseparam(SYSCTL_HANDLER_ARGS) { /* Notes: Not supporting any auto negotiation in these drivers. */ struct lio_ctrl_pkt nctrl; struct lio *lio = (struct lio *)arg1; struct octeon_device *oct = lio->oct_dev; struct octeon_link_info *linfo = &lio->linfo; int err, new_pause = LIO_OFF_PAUSE, old_pause = LIO_OFF_PAUSE; int ret = 0; if (oct->chip_id != LIO_CN23XX_PF_VID) return (EINVAL); if (oct->rx_pause) old_pause |= LIO_RX_PAUSE; if (oct->tx_pause) old_pause |= LIO_TX_PAUSE; new_pause = old_pause; err = sysctl_handle_int(oidp, &new_pause, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_pause == new_pause) return (0); if (linfo->link.s.duplex == 0) { /* no flow control for half duplex */ if (new_pause) return (EINVAL); } bzero(&nctrl, sizeof(struct lio_ctrl_pkt)); nctrl.ncmd.cmd64 = 0; nctrl.ncmd.s.cmd = LIO_CMD_SET_FLOW_CTL; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 100; nctrl.lio = lio; nctrl.cb_fn = lio_ctrl_cmd_completion; if (new_pause & LIO_RX_PAUSE) { /* enable rx pause */ nctrl.ncmd.s.param1 = 1; } else { /* disable rx pause */ nctrl.ncmd.s.param1 = 0; } if (new_pause & LIO_TX_PAUSE) { /* enable tx pause */ nctrl.ncmd.s.param2 = 1; } else { /* disable tx pause */ nctrl.ncmd.s.param2 = 0; } ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { lio_dev_err(oct, "Failed to set pause parameter\n"); return (EINVAL); } oct->rx_pause = new_pause & LIO_RX_PAUSE; oct->tx_pause = new_pause & LIO_TX_PAUSE; return (0); } /* Return register dump user app. */ static int lio_get_regs(SYSCTL_HANDLER_ARGS) { struct lio *lio = (struct lio *)arg1; struct octeon_device *oct = lio->oct_dev; - struct ifnet *ifp = lio->ifp; + if_t ifp = lio->ifp; char *regbuf; int error = EINVAL, len = 0; if (!(if_getflags(ifp) & IFF_DEBUG)) { char debug_info[30] = "Debugging is disabled"; return (sysctl_handle_string(oidp, debug_info, strlen(debug_info), req)); } regbuf = malloc(sizeof(char) * LIO_REGDUMP_LEN_XXXX, M_DEVBUF, M_WAITOK | M_ZERO); if (regbuf == NULL) return (error); switch (oct->chip_id) { case LIO_CN23XX_PF_VID: len += lio_cn23xx_pf_read_csr_reg(regbuf, oct); break; default: len += sprintf(regbuf, "%s Unknown chipid: %d\n", __func__, oct->chip_id); } error = sysctl_handle_string(oidp, regbuf, len, req); free(regbuf, M_DEVBUF); return (error); } static int lio_cn23xx_pf_read_csr_reg(char *s, struct octeon_device *oct) { uint32_t reg; int i, len = 0; uint8_t pf_num = oct->pf_num; /* PCI Window Registers */ len += sprintf(s + len, "\t Octeon CSR Registers\n\n"); /* 0x29030 or 0x29040 */ reg = LIO_CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num); len += sprintf(s + len, "[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n", reg, oct->pcie_port, oct->pf_num, LIO_CAST64(lio_read_csr64(oct, reg))); /* 0x27080 or 0x27090 */ reg = LIO_CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num); len += sprintf(s + len, "[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n", reg, oct->pcie_port, oct->pf_num, LIO_CAST64(lio_read_csr64(oct, reg))); /* 0x27000 or 0x27010 */ reg = LIO_CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num); len += sprintf(s + len, "[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n", reg, oct->pcie_port, oct->pf_num, LIO_CAST64(lio_read_csr64(oct, reg))); /* 0x29120 */ reg = 0x29120; len += sprintf(s + len, "[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg, LIO_CAST64(lio_read_csr64(oct, reg))); /* 0x27300 */ reg = 0x27300 + oct->pcie_port * LIO_CN23XX_MAC_INT_OFFSET + (oct->pf_num) * LIO_CN23XX_PF_INT_OFFSET; len += sprintf(s + len, "[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg, oct->pcie_port, oct->pf_num, LIO_CAST64(lio_read_csr64(oct, reg))); /* 0x27200 */ reg = 0x27200 + oct->pcie_port * LIO_CN23XX_MAC_INT_OFFSET + (oct->pf_num) * LIO_CN23XX_PF_INT_OFFSET; len += sprintf(s + len, "[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n", reg, oct->pcie_port, oct->pf_num, LIO_CAST64(lio_read_csr64(oct, reg))); /* 29130 */ reg = LIO_CN23XX_SLI_PKT_CNT_INT; len += sprintf(s + len, "[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg, LIO_CAST64(lio_read_csr64(oct, reg))); /* 0x29140 */ reg = LIO_CN23XX_SLI_PKT_TIME_INT; len += sprintf(s + len, "[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg, LIO_CAST64(lio_read_csr64(oct, reg))); /* 0x29160 */ reg = 0x29160; len += sprintf(s + len, "[%08x] (SLI_PKT_INT): %016llx\n", reg, LIO_CAST64(lio_read_csr64(oct, reg))); /* 0x29180 */ reg = LIO_CN23XX_SLI_OQ_WMARK; len += sprintf(s + len, "[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n", reg, LIO_CAST64(lio_read_csr64(oct, reg))); /* 0x291E0 */ reg = LIO_CN23XX_SLI_PKT_IOQ_RING_RST; len += sprintf(s + len, "[%08x] (SLI_PKT_RING_RST): %016llx\n", reg, LIO_CAST64(lio_read_csr64(oct, reg))); /* 0x29210 */ reg = LIO_CN23XX_SLI_GBL_CONTROL; len += sprintf(s + len, "[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg, LIO_CAST64(lio_read_csr64(oct, reg))); /* 0x29220 */ reg = 0x29220; len += sprintf(s + len, "[%08x] (SLI_PKT_BIST_STATUS): %016llx\n", reg, LIO_CAST64(lio_read_csr64(oct, reg))); /* PF only */ if (pf_num == 0) { /* 0x29260 */ reg = LIO_CN23XX_SLI_OUT_BP_EN_W1S; len += sprintf(s + len, "[%08x] (SLI_PKT_OUT_BP_EN_W1S): %016llx\n", reg, LIO_CAST64(lio_read_csr64(oct, reg))); } else if (pf_num == 1) { /* 0x29270 */ reg = LIO_CN23XX_SLI_OUT_BP_EN2_W1S; len += sprintf(s + len, "[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n", reg, LIO_CAST64(lio_read_csr64(oct, reg))); } for (i = 0; i < LIO_CN23XX_PF_MAX_OUTPUT_QUEUES; i++) { reg = LIO_CN23XX_SLI_OQ_BUFF_INFO_SIZE(i); len += sprintf(s + len, "[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n", reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x10040 */ for (i = 0; i < LIO_CN23XX_PF_MAX_INPUT_QUEUES; i++) { reg = LIO_CN23XX_SLI_IQ_INSTR_COUNT64(i); len += sprintf(s + len, "[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x10080 */ for (i = 0; i < LIO_CN23XX_PF_MAX_OUTPUT_QUEUES; i++) { reg = LIO_CN23XX_SLI_OQ_PKTS_CREDIT(i); len += sprintf(s + len, "[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n", reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x10090 */ for (i = 0; i < LIO_CN23XX_PF_MAX_OUTPUT_QUEUES; i++) { reg = LIO_CN23XX_SLI_OQ_SIZE(i); len += sprintf(s + len, "[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n", reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x10050 */ for (i = 0; i < LIO_CN23XX_PF_MAX_OUTPUT_QUEUES; i++) { reg = LIO_CN23XX_SLI_OQ_PKT_CONTROL(i); len += sprintf(s + len, "[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n", reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x10070 */ for (i = 0; i < LIO_CN23XX_PF_MAX_OUTPUT_QUEUES; i++) { reg = LIO_CN23XX_SLI_OQ_BASE_ADDR64(i); len += sprintf(s + len, "[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n", reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x100a0 */ for (i = 0; i < LIO_CN23XX_PF_MAX_OUTPUT_QUEUES; i++) { reg = LIO_CN23XX_SLI_OQ_PKT_INT_LEVELS(i); len += sprintf(s + len, "[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n", reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x100b0 */ for (i = 0; i < LIO_CN23XX_PF_MAX_OUTPUT_QUEUES; i++) { reg = LIO_CN23XX_SLI_OQ_PKTS_SENT(i); len += sprintf(s + len, "[%08x] (SLI_PKT%d_CNTS): %016llx\n", reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x100c0 */ for (i = 0; i < LIO_CN23XX_PF_MAX_OUTPUT_QUEUES; i++) { reg = 0x100c0 + i * LIO_CN23XX_OQ_OFFSET; len += sprintf(s + len, "[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n", reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x10000 */ for (i = 0; i < LIO_CN23XX_PF_MAX_INPUT_QUEUES; i++) { reg = LIO_CN23XX_SLI_IQ_PKT_CONTROL64(i); len += sprintf(s + len, "[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n", reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x10010 */ for (i = 0; i < LIO_CN23XX_PF_MAX_INPUT_QUEUES; i++) { reg = LIO_CN23XX_SLI_IQ_BASE_ADDR64(i); len += sprintf(s + len, "[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x10020 */ for (i = 0; i < LIO_CN23XX_PF_MAX_INPUT_QUEUES; i++) { reg = LIO_CN23XX_SLI_IQ_DOORBELL(i); len += sprintf(s + len, "[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n", reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x10030 */ for (i = 0; i < LIO_CN23XX_PF_MAX_INPUT_QUEUES; i++) { reg = LIO_CN23XX_SLI_IQ_SIZE(i); len += sprintf(s + len, "[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n", reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); } /* 0x10040 */ for (i = 0; i < LIO_CN23XX_PF_MAX_INPUT_QUEUES; i++) reg = LIO_CN23XX_SLI_IQ_INSTR_COUNT64(i); len += sprintf(s + len, "[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", reg, i, LIO_CAST64(lio_read_csr64(oct, reg))); return (len); } static int lio_get_ringparam(SYSCTL_HANDLER_ARGS) { struct lio *lio = (struct lio *)arg1; struct octeon_device *oct = lio->oct_dev; uint32_t rx_max_pending = 0, tx_max_pending = 0; int err; if (LIO_CN23XX_PF(oct)) { tx_max_pending = LIO_CN23XX_MAX_IQ_DESCRIPTORS; rx_max_pending = LIO_CN23XX_MAX_OQ_DESCRIPTORS; } switch (arg2) { case LIO_SET_RING_RX: err = sysctl_handle_int(oidp, &rx_max_pending, 0, req); break; case LIO_SET_RING_TX: err = sysctl_handle_int(oidp, &tx_max_pending, 0, req); break; } return (err); } static int -lio_reset_queues(struct ifnet *ifp, uint32_t num_qs) +lio_reset_queues(if_t ifp, uint32_t num_qs) { struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; int i, update = 0; if (lio_wait_for_pending_requests(oct)) lio_dev_err(oct, "There were pending requests\n"); if (lio_wait_for_instr_fetch(oct)) lio_dev_err(oct, "IQ had pending instructions\n"); /* * Disable the input and output queues now. No more packets will * arrive from Octeon. */ oct->fn_list.disable_io_queues(oct); if (num_qs != oct->num_iqs) update = 1; for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) { if (!(oct->io_qmask.oq & BIT_ULL(i))) continue; lio_delete_droq(oct, i); } for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) { if (!(oct->io_qmask.iq & BIT_ULL(i))) continue; lio_delete_instr_queue(oct, i); } if (oct->fn_list.setup_device_regs(oct)) { lio_dev_err(oct, "Failed to configure device registers\n"); return (-1); } if (lio_setup_io_queues(oct, 0, num_qs, num_qs)) { lio_dev_err(oct, "IO queues initialization failed\n"); return (-1); } if (update && lio_send_queue_count_update(ifp, num_qs)) return (-1); return (0); } static int lio_set_ringparam(SYSCTL_HANDLER_ARGS) { struct lio *lio = (struct lio *)arg1; struct octeon_device *oct = lio->oct_dev; uint32_t rx_count, rx_count_old, tx_count, tx_count_old; int err, stopped = 0; if (!LIO_CN23XX_PF(oct)) return (EINVAL); switch (arg2) { case LIO_SET_RING_RX: rx_count = rx_count_old = oct->droq[0]->max_count; err = sysctl_handle_int(oidp, &rx_count, 0, req); if ((err) || (req->newptr == NULL)) return (err); rx_count = min(max(rx_count, LIO_CN23XX_MIN_OQ_DESCRIPTORS), LIO_CN23XX_MAX_OQ_DESCRIPTORS); if (rx_count == rx_count_old) return (0); lio_ifstate_set(lio, LIO_IFSTATE_RESETTING); if (if_getdrvflags(lio->ifp) & IFF_DRV_RUNNING) { lio_stop(lio->ifp); stopped = 1; } /* Change RX DESCS count */ LIO_SET_NUM_RX_DESCS_NIC_IF(lio_get_conf(oct), lio->ifidx, rx_count); break; case LIO_SET_RING_TX: tx_count = tx_count_old = oct->instr_queue[0]->max_count; err = sysctl_handle_int(oidp, &tx_count, 0, req); if ((err) || (req->newptr == NULL)) return (err); tx_count = min(max(tx_count, LIO_CN23XX_MIN_IQ_DESCRIPTORS), LIO_CN23XX_MAX_IQ_DESCRIPTORS); if (tx_count == tx_count_old) return (0); lio_ifstate_set(lio, LIO_IFSTATE_RESETTING); if (if_getdrvflags(lio->ifp) & IFF_DRV_RUNNING) { lio_stop(lio->ifp); stopped = 1; } /* Change TX DESCS count */ LIO_SET_NUM_TX_DESCS_NIC_IF(lio_get_conf(oct), lio->ifidx, tx_count); break; } if (lio_reset_queues(lio->ifp, lio->linfo.num_txpciq)) goto err_lio_reset_queues; lio_irq_reallocate_irqs(oct, lio->linfo.num_txpciq); if (stopped) lio_open(lio); lio_ifstate_reset(lio, LIO_IFSTATE_RESETTING); return (0); err_lio_reset_queues: if (arg2 == LIO_SET_RING_RX && rx_count != rx_count_old) LIO_SET_NUM_RX_DESCS_NIC_IF(lio_get_conf(oct), lio->ifidx, rx_count_old); if (arg2 == LIO_SET_RING_TX && tx_count != tx_count_old) LIO_SET_NUM_TX_DESCS_NIC_IF(lio_get_conf(oct), lio->ifidx, tx_count_old); return (EINVAL); } static int lio_get_channels(SYSCTL_HANDLER_ARGS) { struct lio *lio = (struct lio *)arg1; struct octeon_device *oct = lio->oct_dev; uint32_t max_combined = 0; if (LIO_CN23XX_PF(oct)) max_combined = lio->linfo.num_txpciq; return (sysctl_handle_int(oidp, &max_combined, 0, req)); } static int lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs) { int i, num_msix_irqs = 0; if (!oct->msix_on) return (0); /* * Disable the input and output queues now. No more packets will * arrive from Octeon. */ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); if (oct->msix_on) { if (LIO_CN23XX_PF(oct)) num_msix_irqs = oct->num_msix_irqs - 1; for (i = 0; i < num_msix_irqs; i++) { if (oct->ioq_vector[i].tag != NULL) { bus_teardown_intr(oct->device, oct->ioq_vector[i].msix_res, oct->ioq_vector[i].tag); oct->ioq_vector[i].tag = NULL; } if (oct->ioq_vector[i].msix_res != NULL) { bus_release_resource(oct->device, SYS_RES_IRQ, oct->ioq_vector[i].vector, oct->ioq_vector[i].msix_res); oct->ioq_vector[i].msix_res = NULL; } } if (oct->tag != NULL) { bus_teardown_intr(oct->device, oct->msix_res, oct->tag); oct->tag = NULL; } if (oct->msix_res != NULL) { bus_release_resource(oct->device, SYS_RES_IRQ, oct->aux_vector, oct->msix_res); oct->msix_res = NULL; } pci_release_msi(oct->device); } if (lio_setup_interrupt(oct, num_ioqs)) { lio_dev_info(oct, "Setup interuupt failed\n"); return (1); } /* Enable Octeon device interrupts */ oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); return (0); } static int lio_set_channels(SYSCTL_HANDLER_ARGS) { struct lio *lio = (struct lio *)arg1; struct octeon_device *oct = lio->oct_dev; uint32_t combined_count, max_combined; int err, stopped = 0; if (strcmp(oct->fw_info.lio_firmware_version, "1.6.1") < 0) { lio_dev_err(oct, "Minimum firmware version required is 1.6.1\n"); return (EINVAL); } combined_count = oct->num_iqs; err = sysctl_handle_int(oidp, &combined_count, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (!combined_count) return (EINVAL); if (LIO_CN23XX_PF(oct)) { max_combined = lio->linfo.num_txpciq; } else { return (EINVAL); } if ((combined_count > max_combined) || (combined_count < 1)) return (EINVAL); if (combined_count == oct->num_iqs) return (0); lio_ifstate_set(lio, LIO_IFSTATE_RESETTING); if (if_getdrvflags(lio->ifp) & IFF_DRV_RUNNING) { lio_stop(lio->ifp); stopped = 1; } if (lio_reset_queues(lio->ifp, combined_count)) return (EINVAL); lio_irq_reallocate_irqs(oct, combined_count); if (stopped) lio_open(lio); lio_ifstate_reset(lio, LIO_IFSTATE_RESETTING); return (0); } static int lio_get_set_fwmsglevel(SYSCTL_HANDLER_ARGS) { struct lio *lio = (struct lio *)arg1; - struct ifnet *ifp = lio->ifp; + if_t ifp = lio->ifp; int err, new_msglvl = 0, old_msglvl = 0; if (lio_ifstate_check(lio, LIO_IFSTATE_RESETTING)) return (ENXIO); old_msglvl = new_msglvl = lio->msg_enable; err = sysctl_handle_int(oidp, &new_msglvl, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_msglvl == new_msglvl) return (0); if (new_msglvl ^ lio->msg_enable) { if (new_msglvl) err = lio_set_feature(ifp, LIO_CMD_VERBOSE_ENABLE, 0); else err = lio_set_feature(ifp, LIO_CMD_VERBOSE_DISABLE, 0); } lio->msg_enable = new_msglvl; return ((err) ? EINVAL : 0); } static int lio_set_stats_interval(SYSCTL_HANDLER_ARGS) { struct lio *lio = (struct lio *)arg1; int err, new_time = 0, old_time = 0; old_time = new_time = lio->stats_interval; err = sysctl_handle_int(oidp, &new_time, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_time == new_time) return (0); lio->stats_interval = new_time; return (0); } static void lio_fw_stats_callback(struct octeon_device *oct_dev, uint32_t status, void *ptr) { struct lio_soft_command *sc = (struct lio_soft_command *)ptr; struct lio_fw_stats_resp *resp = (struct lio_fw_stats_resp *)sc->virtrptr; struct octeon_rx_stats *rsp_rstats = &resp->stats.fromwire; struct octeon_tx_stats *rsp_tstats = &resp->stats.fromhost; struct octeon_rx_stats *rstats = &oct_dev->link_stats.fromwire; struct octeon_tx_stats *tstats = &oct_dev->link_stats.fromhost; - struct ifnet *ifp = oct_dev->props.ifp; + if_t ifp = oct_dev->props.ifp; struct lio *lio = if_getsoftc(ifp); if ((status != LIO_REQUEST_TIMEOUT) && !resp->status) { lio_swap_8B_data((uint64_t *)&resp->stats, (sizeof(struct octeon_link_stats)) >> 3); /* RX link-level stats */ rstats->total_rcvd = rsp_rstats->total_rcvd; rstats->bytes_rcvd = rsp_rstats->bytes_rcvd; rstats->total_bcst = rsp_rstats->total_bcst; rstats->total_mcst = rsp_rstats->total_mcst; rstats->runts = rsp_rstats->runts; rstats->ctl_rcvd = rsp_rstats->ctl_rcvd; /* Accounts for over/under-run of buffers */ rstats->fifo_err = rsp_rstats->fifo_err; rstats->dmac_drop = rsp_rstats->dmac_drop; rstats->fcs_err = rsp_rstats->fcs_err; rstats->jabber_err = rsp_rstats->jabber_err; rstats->l2_err = rsp_rstats->l2_err; rstats->frame_err = rsp_rstats->frame_err; /* RX firmware stats */ rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd; rstats->fw_total_fwd = rsp_rstats->fw_total_fwd; rstats->fw_err_pko = rsp_rstats->fw_err_pko; rstats->fw_err_link = rsp_rstats->fw_err_link; rstats->fw_err_drop = rsp_rstats->fw_err_drop; rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan; rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err; /* Number of packets that are LROed */ rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts; /* Number of octets that are LROed */ rstats->fw_lro_octs = rsp_rstats->fw_lro_octs; /* Number of LRO packets formed */ rstats->fw_total_lro = rsp_rstats->fw_total_lro; /* Number of times lRO of packet aborted */ rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts; rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port; rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq; rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval; rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer; /* intrmod: packet forward rate */ rstats->fwd_rate = rsp_rstats->fwd_rate; /* TX link-level stats */ tstats->total_pkts_sent = rsp_tstats->total_pkts_sent; tstats->total_bytes_sent = rsp_tstats->total_bytes_sent; tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent; tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent; tstats->ctl_sent = rsp_tstats->ctl_sent; /* Packets sent after one collision */ tstats->one_collision_sent = rsp_tstats->one_collision_sent; /* Packets sent after multiple collision */ tstats->multi_collision_sent = rsp_tstats->multi_collision_sent; /* Packets not sent due to max collisions */ tstats->max_collision_fail = rsp_tstats->max_collision_fail; /* Packets not sent due to max deferrals */ tstats->max_deferral_fail = rsp_tstats->max_deferral_fail; /* Accounts for over/under-run of buffers */ tstats->fifo_err = rsp_tstats->fifo_err; tstats->runts = rsp_tstats->runts; /* Total number of collisions detected */ tstats->total_collisions = rsp_tstats->total_collisions; /* firmware stats */ tstats->fw_total_sent = rsp_tstats->fw_total_sent; tstats->fw_total_fwd = rsp_tstats->fw_total_fwd; tstats->fw_err_pko = rsp_tstats->fw_err_pko; tstats->fw_err_pki = rsp_tstats->fw_err_pki; tstats->fw_err_link = rsp_tstats->fw_err_link; tstats->fw_err_drop = rsp_tstats->fw_err_drop; tstats->fw_tso = rsp_tstats->fw_tso; tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd; tstats->fw_err_tso = rsp_tstats->fw_err_tso; tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan; } lio_free_soft_command(oct_dev, sc); callout_schedule(&lio->stats_timer, lio_ms_to_ticks(lio->stats_interval)); } /* Configure interrupt moderation parameters */ static void lio_get_fw_stats(void *arg) { struct lio *lio = arg; struct octeon_device *oct_dev = lio->oct_dev; struct lio_soft_command *sc; struct lio_fw_stats_resp *resp; int retval; if (callout_pending(&lio->stats_timer) || callout_active(&lio->stats_timer) == 0) return; /* Alloc soft command */ sc = lio_alloc_soft_command(oct_dev, 0, sizeof(struct lio_fw_stats_resp), 0); if (sc == NULL) goto alloc_sc_failed; resp = (struct lio_fw_stats_resp *)sc->virtrptr; bzero(resp, sizeof(struct lio_fw_stats_resp)); sc->iq_no = lio->linfo.txpciq[0].s.q_no; lio_prepare_soft_command(oct_dev, sc, LIO_OPCODE_NIC, LIO_OPCODE_NIC_PORT_STATS, 0, 0, 0); sc->callback = lio_fw_stats_callback; sc->callback_arg = sc; sc->wait_time = 500; /* in milli seconds */ retval = lio_send_soft_command(oct_dev, sc); if (retval == LIO_IQ_SEND_FAILED) goto send_sc_failed; return; send_sc_failed: lio_free_soft_command(oct_dev, sc); alloc_sc_failed: callout_schedule(&lio->stats_timer, lio_ms_to_ticks(lio->stats_interval)); } /* Callback function for intrmod */ static void lio_get_intrmod_callback(struct octeon_device *oct_dev, uint32_t status, void *ptr) { struct lio_soft_command *sc = (struct lio_soft_command *)ptr; - struct ifnet *ifp = oct_dev->props.ifp; + if_t ifp = oct_dev->props.ifp; struct lio *lio = if_getsoftc(ifp); struct lio_intrmod_resp *resp; if (status) { lio_dev_err(oct_dev, "Failed to get intrmod\n"); } else { resp = (struct lio_intrmod_resp *)sc->virtrptr; lio_swap_8B_data((uint64_t *)&resp->intrmod, (sizeof(struct octeon_intrmod_cfg)) / 8); memcpy(&lio->intrmod_cfg, &resp->intrmod, sizeof(struct octeon_intrmod_cfg)); } lio_free_soft_command(oct_dev, sc); } /* get interrupt moderation parameters */ static int lio_get_intrmod_cfg(struct lio *lio, struct octeon_intrmod_cfg *intr_cfg) { struct lio_soft_command *sc; struct lio_intrmod_resp *resp; struct octeon_device *oct_dev = lio->oct_dev; int retval; /* Alloc soft command */ sc = lio_alloc_soft_command(oct_dev, 0, sizeof(struct lio_intrmod_resp), 0); if (sc == NULL) return (ENOMEM); resp = (struct lio_intrmod_resp *)sc->virtrptr; bzero(resp, sizeof(struct lio_intrmod_resp)); sc->iq_no = lio->linfo.txpciq[0].s.q_no; lio_prepare_soft_command(oct_dev, sc, LIO_OPCODE_NIC, LIO_OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0); sc->callback = lio_get_intrmod_callback; sc->callback_arg = sc; sc->wait_time = 1000; retval = lio_send_soft_command(oct_dev, sc); if (retval == LIO_IQ_SEND_FAILED) { lio_free_soft_command(oct_dev, sc); return (EINVAL); } return (0); } static void lio_set_intrmod_callback(struct octeon_device *oct_dev, uint32_t status, void *ptr) { struct lio_soft_command *sc = (struct lio_soft_command *)ptr; struct lio_intrmod_context *ctx; ctx = (struct lio_intrmod_context *)sc->ctxptr; ctx->status = status; ctx->cond = 1; /* * This barrier is required to be sure that the response has been * written fully before waking up the handler */ wmb(); } /* Configure interrupt moderation parameters */ static int lio_set_intrmod_cfg(struct lio *lio, struct octeon_intrmod_cfg *intr_cfg) { struct lio_soft_command *sc; struct lio_intrmod_context *ctx; struct octeon_intrmod_cfg *cfg; struct octeon_device *oct_dev = lio->oct_dev; int retval; /* Alloc soft command */ sc = lio_alloc_soft_command(oct_dev, sizeof(struct octeon_intrmod_cfg), 0, sizeof(struct lio_intrmod_context)); if (sc == NULL) return (ENOMEM); ctx = (struct lio_intrmod_context *)sc->ctxptr; ctx->cond = 0; ctx->octeon_id = lio_get_device_id(oct_dev); cfg = (struct octeon_intrmod_cfg *)sc->virtdptr; memcpy(cfg, intr_cfg, sizeof(struct octeon_intrmod_cfg)); lio_swap_8B_data((uint64_t *)cfg, (sizeof(struct octeon_intrmod_cfg)) / 8); sc->iq_no = lio->linfo.txpciq[0].s.q_no; lio_prepare_soft_command(oct_dev, sc, LIO_OPCODE_NIC, LIO_OPCODE_NIC_INTRMOD_CFG, 0, 0, 0); sc->callback = lio_set_intrmod_callback; sc->callback_arg = sc; sc->wait_time = 1000; retval = lio_send_soft_command(oct_dev, sc); if (retval == LIO_IQ_SEND_FAILED) { lio_free_soft_command(oct_dev, sc); return (EINVAL); } /* * Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ lio_sleep_cond(oct_dev, &ctx->cond); retval = ctx->status; if (retval) lio_dev_err(oct_dev, "intrmod config failed. Status: %llx\n", LIO_CAST64(retval)); else lio_dev_info(oct_dev, "Rx-Adaptive Interrupt moderation enabled:%llx\n", LIO_CAST64(intr_cfg->rx_enable)); lio_free_soft_command(oct_dev, sc); return ((retval) ? ETIMEDOUT : 0); } static int lio_intrmod_cfg_rx_intrcnt(struct lio *lio, struct octeon_intrmod_cfg *intrmod, uint32_t rx_max_frames) { struct octeon_device *oct = lio->oct_dev; uint32_t rx_max_coalesced_frames; /* Config Cnt based interrupt values */ switch (oct->chip_id) { case LIO_CN23XX_PF_VID:{ int q_no; if (!rx_max_frames) rx_max_coalesced_frames = intrmod->rx_frames; else rx_max_coalesced_frames = rx_max_frames; for (q_no = 0; q_no < oct->num_oqs; q_no++) { q_no += oct->sriov_info.pf_srn; lio_write_csr64(oct, LIO_CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), (lio_read_csr64(oct, LIO_CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) & (0x3fffff00000000UL)) | (rx_max_coalesced_frames - 1)); /* consider setting resend bit */ } intrmod->rx_frames = rx_max_coalesced_frames; oct->rx_max_coalesced_frames = rx_max_coalesced_frames; break; } default: return (EINVAL); } return (0); } static int lio_intrmod_cfg_rx_intrtime(struct lio *lio, struct octeon_intrmod_cfg *intrmod, uint32_t rx_usecs) { struct octeon_device *oct = lio->oct_dev; uint32_t rx_coalesce_usecs; /* Config Time based interrupt values */ switch (oct->chip_id) { case LIO_CN23XX_PF_VID:{ uint64_t time_threshold; int q_no; if (!rx_usecs) rx_coalesce_usecs = intrmod->rx_usecs; else rx_coalesce_usecs = rx_usecs; time_threshold = lio_cn23xx_pf_get_oq_ticks(oct, rx_coalesce_usecs); for (q_no = 0; q_no < oct->num_oqs; q_no++) { q_no += oct->sriov_info.pf_srn; lio_write_csr64(oct, LIO_CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), (intrmod->rx_frames | ((uint64_t)time_threshold << 32))); /* consider writing to resend bit here */ } intrmod->rx_usecs = rx_coalesce_usecs; oct->rx_coalesce_usecs = rx_coalesce_usecs; break; } default: return (EINVAL); } return (0); } static int lio_intrmod_cfg_tx_intrcnt(struct lio *lio, struct octeon_intrmod_cfg *intrmod, uint32_t tx_max_frames) { struct octeon_device *oct = lio->oct_dev; uint64_t val; uint32_t iq_intr_pkt; uint32_t inst_cnt_reg; /* Config Cnt based interrupt values */ switch (oct->chip_id) { case LIO_CN23XX_PF_VID:{ int q_no; if (!tx_max_frames) iq_intr_pkt = LIO_CN23XX_DEF_IQ_INTR_THRESHOLD & LIO_CN23XX_PKT_IN_DONE_WMARK_MASK; else iq_intr_pkt = tx_max_frames & LIO_CN23XX_PKT_IN_DONE_WMARK_MASK; for (q_no = 0; q_no < oct->num_iqs; q_no++) { inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg; val = lio_read_csr64(oct, inst_cnt_reg); /* * clear wmark and count.dont want to write * count back */ val = (val & 0xFFFF000000000000ULL) | ((uint64_t)(iq_intr_pkt - 1) << LIO_CN23XX_PKT_IN_DONE_WMARK_BIT_POS); lio_write_csr64(oct, inst_cnt_reg, val); /* consider setting resend bit */ } intrmod->tx_frames = iq_intr_pkt; oct->tx_max_coalesced_frames = iq_intr_pkt; break; } default: return (-EINVAL); } return (0); } static int lio_get_set_intr_coalesce(SYSCTL_HANDLER_ARGS) { struct lio *lio = (struct lio *)arg1; struct octeon_device *oct = lio->oct_dev; uint64_t new_val = 0, old_val = 0; uint32_t rx_coalesce_usecs = 0; uint32_t rx_max_coalesced_frames = 0; uint32_t tx_coalesce_usecs = 0; int err, ret; switch (arg2) { case LIO_USE_ADAPTIVE_RX_COALESCE: if (lio->intrmod_cfg.rx_enable) new_val = old_val = lio->intrmod_cfg.rx_enable; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); lio->intrmod_cfg.rx_enable = new_val ? 1 : 0; break; case LIO_USE_ADAPTIVE_TX_COALESCE: if (lio->intrmod_cfg.tx_enable) new_val = old_val = lio->intrmod_cfg.tx_enable; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); lio->intrmod_cfg.tx_enable = new_val ? 1 : 0; break; case LIO_RX_COALESCE_USECS: if (!lio->intrmod_cfg.rx_enable) new_val = old_val = oct->rx_coalesce_usecs; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); rx_coalesce_usecs = new_val; break; case LIO_RX_MAX_COALESCED_FRAMES: if (!lio->intrmod_cfg.rx_enable) new_val = old_val = oct->rx_max_coalesced_frames; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); rx_max_coalesced_frames = new_val; break; case LIO_TX_MAX_COALESCED_FRAMES: if (!lio->intrmod_cfg.tx_enable) new_val = old_val = oct->tx_max_coalesced_frames; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); tx_coalesce_usecs = new_val; break; case LIO_PKT_RATE_LOW: if (lio->intrmod_cfg.rx_enable) new_val = old_val = lio->intrmod_cfg.minpkt_ratethr; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); if (lio->intrmod_cfg.rx_enable || lio->intrmod_cfg.tx_enable) lio->intrmod_cfg.minpkt_ratethr = new_val; break; case LIO_RX_COALESCE_USECS_LOW: if (lio->intrmod_cfg.rx_enable) new_val = old_val = lio->intrmod_cfg.rx_mintmr_trigger; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); if (lio->intrmod_cfg.rx_enable) lio->intrmod_cfg.rx_mintmr_trigger = new_val; break; case LIO_RX_MAX_COALESCED_FRAMES_LOW: if (lio->intrmod_cfg.rx_enable) new_val = old_val = lio->intrmod_cfg.rx_mincnt_trigger; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); if (lio->intrmod_cfg.rx_enable) lio->intrmod_cfg.rx_mincnt_trigger = new_val; break; case LIO_TX_MAX_COALESCED_FRAMES_LOW: if (lio->intrmod_cfg.tx_enable) new_val = old_val = lio->intrmod_cfg.tx_mincnt_trigger; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); if (lio->intrmod_cfg.tx_enable) lio->intrmod_cfg.tx_mincnt_trigger = new_val; break; case LIO_PKT_RATE_HIGH: if (lio->intrmod_cfg.rx_enable) new_val = old_val = lio->intrmod_cfg.maxpkt_ratethr; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); if (lio->intrmod_cfg.rx_enable || lio->intrmod_cfg.tx_enable) lio->intrmod_cfg.maxpkt_ratethr = new_val; break; case LIO_RX_COALESCE_USECS_HIGH: if (lio->intrmod_cfg.rx_enable) new_val = old_val = lio->intrmod_cfg.rx_maxtmr_trigger; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); if (lio->intrmod_cfg.rx_enable) lio->intrmod_cfg.rx_maxtmr_trigger = new_val; break; case LIO_RX_MAX_COALESCED_FRAMES_HIGH: if (lio->intrmod_cfg.rx_enable) new_val = old_val = lio->intrmod_cfg.rx_maxcnt_trigger; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); if (lio->intrmod_cfg.rx_enable) lio->intrmod_cfg.rx_maxcnt_trigger = new_val; break; case LIO_TX_MAX_COALESCED_FRAMES_HIGH: if (lio->intrmod_cfg.tx_enable) new_val = old_val = lio->intrmod_cfg.tx_maxcnt_trigger; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); if (lio->intrmod_cfg.tx_enable) lio->intrmod_cfg.tx_maxcnt_trigger = new_val; break; case LIO_RATE_SAMPLE_INTERVAL: if (lio->intrmod_cfg.rx_enable) new_val = old_val = lio->intrmod_cfg.check_intrvl; err = sysctl_handle_64(oidp, &new_val, 0, req); if ((err) || (req->newptr == NULL)) return (err); if (old_val == new_val) return (0); if (lio->intrmod_cfg.rx_enable || lio->intrmod_cfg.tx_enable) lio->intrmod_cfg.check_intrvl = new_val; break; default: return (EINVAL); } lio->intrmod_cfg.rx_usecs = LIO_GET_OQ_INTR_TIME_CFG(lio_get_conf(oct)); lio->intrmod_cfg.rx_frames = LIO_GET_OQ_INTR_PKT_CFG(lio_get_conf(oct)); lio->intrmod_cfg.tx_frames = LIO_GET_IQ_INTR_PKT_CFG(lio_get_conf(oct)); ret = lio_set_intrmod_cfg(lio, &lio->intrmod_cfg); if (ret) lio_dev_err(oct, "Interrupt coalescing updation to Firmware failed!\n"); if (!lio->intrmod_cfg.rx_enable) { if (!rx_coalesce_usecs) rx_coalesce_usecs = oct->rx_coalesce_usecs; if (!rx_max_coalesced_frames) rx_max_coalesced_frames = oct->rx_max_coalesced_frames; ret = lio_intrmod_cfg_rx_intrtime(lio, &lio->intrmod_cfg, rx_coalesce_usecs); if (ret) return (ret); ret = lio_intrmod_cfg_rx_intrcnt(lio, &lio->intrmod_cfg, rx_max_coalesced_frames); if (ret) return (ret); } else { oct->rx_coalesce_usecs = LIO_GET_OQ_INTR_TIME_CFG(lio_get_conf(oct)); oct->rx_max_coalesced_frames = LIO_GET_OQ_INTR_PKT_CFG(lio_get_conf(oct)); } if (!lio->intrmod_cfg.tx_enable) { if (!tx_coalesce_usecs) tx_coalesce_usecs = oct->tx_max_coalesced_frames; ret = lio_intrmod_cfg_tx_intrcnt(lio, &lio->intrmod_cfg, tx_coalesce_usecs); if (ret) return (ret); } else { oct->tx_max_coalesced_frames = LIO_GET_IQ_INTR_PKT_CFG(lio_get_conf(oct)); } return (0); }