diff --git a/sys/dev/vnic/nic.h b/sys/dev/vnic/nic.h index dba91af666d6..2dcaffe1628c 100644 --- a/sys/dev/vnic/nic.h +++ b/sys/dev/vnic/nic.h @@ -1,519 +1,519 @@ /* * Copyright (C) 2015 Cavium Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #ifndef NIC_H #define NIC_H /* PCI vendor ID */ #define PCI_VENDOR_ID_CAVIUM 0x177D /* PCI device IDs */ #define PCI_DEVICE_ID_THUNDER_NIC_PF 0xA01E #define PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF 0x0011 #define PCI_DEVICE_ID_THUNDER_NIC_VF 0xA034 #define PCI_DEVICE_ID_THUNDER_BGX 0xA026 /* PCI BAR nos */ #define PCI_CFG_REG_BAR_NUM 0 #define PCI_MSIX_REG_BAR_NUM 4 /* PCI revision IDs */ #define PCI_REVID_PASS2 8 /* NIC SRIOV VF count */ #define MAX_NUM_VFS_SUPPORTED 128 #define DEFAULT_NUM_VF_ENABLED 8 #define NIC_TNS_BYPASS_MODE 0 #define NIC_TNS_MODE 1 /* NIC priv flags */ #define NIC_SRIOV_ENABLED (1 << 0) #define NIC_TNS_ENABLED (1 << 1) /* ARM64TODO */ #if 0 /* VNIC HW optimiation features */ #define VNIC_RSS_SUPPORT #define VNIC_MULTI_QSET_SUPPORT #endif /* Min/Max packet size */ #define NIC_HW_MIN_FRS 64 #define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */ /* Max pkinds */ #define NIC_MAX_PKIND 16 /* * Rx Channels */ /* Receive channel configuration in TNS bypass mode * Below is configuration in TNS bypass mode * BGX0-LMAC0-CHAN0 - VNIC CHAN0 * BGX0-LMAC1-CHAN0 - VNIC CHAN16 * ... * BGX1-LMAC0-CHAN0 - VNIC CHAN128 * ... * BGX1-LMAC3-CHAN0 - VNIC CHAN174 */ #define NIC_INTF_COUNT 2 /* Interfaces btw VNIC and TNS/BGX */ #define NIC_CHANS_PER_INF 128 #define NIC_MAX_CHANS (NIC_INTF_COUNT * NIC_CHANS_PER_INF) #define NIC_CPI_COUNT 2048 /* No of channel parse indices */ /* TNS bypass mode: 1-1 mapping between VNIC and BGX:LMAC */ #define NIC_MAX_BGX MAX_BGX_PER_CN88XX #define NIC_CPI_PER_BGX (NIC_CPI_COUNT / NIC_MAX_BGX) #define NIC_MAX_CPI_PER_LMAC 64 /* Max when CPI_ALG is IP diffserv */ #define NIC_RSSI_PER_BGX (NIC_RSSI_COUNT / NIC_MAX_BGX) /* Tx scheduling */ #define NIC_MAX_TL4 1024 #define NIC_MAX_TL4_SHAPERS 256 /* 1 shaper for 4 TL4s */ #define NIC_MAX_TL3 256 #define NIC_MAX_TL3_SHAPERS 64 /* 1 shaper for 4 TL3s */ #define NIC_MAX_TL2 64 #define NIC_MAX_TL2_SHAPERS 2 /* 1 shaper for 32 TL2s */ #define NIC_MAX_TL1 2 /* TNS bypass mode */ #define NIC_TL2_PER_BGX 32 #define NIC_TL4_PER_BGX (NIC_MAX_TL4 / NIC_MAX_BGX) #define NIC_TL4_PER_LMAC (NIC_MAX_TL4 / NIC_CHANS_PER_INF) /* NIC VF Interrupts */ #define NICVF_INTR_CQ 0 #define NICVF_INTR_SQ 1 #define NICVF_INTR_RBDR 2 #define NICVF_INTR_PKT_DROP 3 #define NICVF_INTR_TCP_TIMER 4 #define NICVF_INTR_MBOX 5 #define NICVF_INTR_QS_ERR 6 #define NICVF_INTR_CQ_SHIFT 0 #define NICVF_INTR_SQ_SHIFT 8 #define NICVF_INTR_RBDR_SHIFT 16 #define NICVF_INTR_PKT_DROP_SHIFT 20 #define NICVF_INTR_TCP_TIMER_SHIFT 21 #define NICVF_INTR_MBOX_SHIFT 22 #define NICVF_INTR_QS_ERR_SHIFT 23 #define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT) #define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT) #define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT) #define NICVF_INTR_PKT_DROP_MASK (1 << NICVF_INTR_PKT_DROP_SHIFT) #define NICVF_INTR_TCP_TIMER_MASK (1 << NICVF_INTR_TCP_TIMER_SHIFT) #define NICVF_INTR_MBOX_MASK (1 << NICVF_INTR_MBOX_SHIFT) #define NICVF_INTR_QS_ERR_MASK (1 << NICVF_INTR_QS_ERR_SHIFT) /* MSI-X interrupts */ #define NIC_PF_MSIX_VECTORS 10 #define NIC_VF_MSIX_VECTORS 20 #define NIC_PF_INTR_ID_ECC0_SBE 0 #define NIC_PF_INTR_ID_ECC0_DBE 1 #define NIC_PF_INTR_ID_ECC1_SBE 2 #define NIC_PF_INTR_ID_ECC1_DBE 3 #define NIC_PF_INTR_ID_ECC2_SBE 4 #define NIC_PF_INTR_ID_ECC2_DBE 5 #define NIC_PF_INTR_ID_ECC3_SBE 6 #define NIC_PF_INTR_ID_ECC3_DBE 7 #define NIC_PF_INTR_ID_MBOX0 8 #define NIC_PF_INTR_ID_MBOX1 9 struct msix_entry { struct resource * irq_res; void * handle; }; /* * Global timer for CQ timer thresh interrupts * Calculated for SCLK of 700Mhz * value written should be a 1/16th of what is expected * * 1 tick per 0.05usec = value of 2.2 * This 10% would be covered in CQ timer thresh value */ #define NICPF_CLK_PER_INT_TICK 2 /* * Time to wait before we decide that a SQ is stuck. * * Since both pkt rx and tx notifications are done with same CQ, * when packets are being received at very high rate (eg: L2 forwarding) * then freeing transmitted skbs will be delayed and watchdog * will kick in, resetting interface. Hence keeping this value high. */ #define NICVF_TX_TIMEOUT (50 * HZ) #define NIC_RSSI_COUNT 4096 /* Total no of RSS indices */ #define NIC_MAX_RSS_HASH_BITS 8 #define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS) #define RSS_HASH_KEY_SIZE 5 /* 320 bit key */ struct nicvf_rss_info { boolean_t enable; #define RSS_L2_EXTENDED_HASH_ENA (1UL << 0) #define RSS_IP_HASH_ENA (1UL << 1) #define RSS_TCP_HASH_ENA (1UL << 2) #define RSS_TCP_SYN_DIS (1UL << 3) #define RSS_UDP_HASH_ENA (1UL << 4) #define RSS_L4_EXTENDED_HASH_ENA (1UL << 5) #define RSS_ROCE_ENA (1UL << 6) #define RSS_L3_BI_DIRECTION_ENA (1UL << 7) #define RSS_L4_BI_DIRECTION_ENA (1UL << 8) uint64_t cfg; uint8_t hash_bits; uint16_t rss_size; uint8_t ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; uint64_t key[RSS_HASH_KEY_SIZE]; }; enum rx_stats_reg_offset { RX_OCTS = 0x0, RX_UCAST = 0x1, RX_BCAST = 0x2, RX_MCAST = 0x3, RX_RED = 0x4, RX_RED_OCTS = 0x5, RX_ORUN = 0x6, RX_ORUN_OCTS = 0x7, RX_FCS = 0x8, RX_L2ERR = 0x9, RX_DRP_BCAST = 0xa, RX_DRP_MCAST = 0xb, RX_DRP_L3BCAST = 0xc, RX_DRP_L3MCAST = 0xd, RX_STATS_ENUM_LAST, }; enum tx_stats_reg_offset { TX_OCTS = 0x0, TX_UCAST = 0x1, TX_BCAST = 0x2, TX_MCAST = 0x3, TX_DROP = 0x4, TX_STATS_ENUM_LAST, }; struct nicvf_hw_stats { uint64_t rx_bytes; uint64_t rx_ucast_frames; uint64_t rx_bcast_frames; uint64_t rx_mcast_frames; uint64_t rx_fcs_errors; uint64_t rx_l2_errors; uint64_t rx_drop_red; uint64_t rx_drop_red_bytes; uint64_t rx_drop_overrun; uint64_t rx_drop_overrun_bytes; uint64_t rx_drop_bcast; uint64_t rx_drop_mcast; uint64_t rx_drop_l3_bcast; uint64_t rx_drop_l3_mcast; uint64_t rx_bgx_truncated_pkts; uint64_t rx_jabber_errs; uint64_t rx_fcs_errs; uint64_t rx_bgx_errs; uint64_t rx_prel2_errs; uint64_t rx_l2_hdr_malformed; uint64_t rx_oversize; uint64_t rx_undersize; uint64_t rx_l2_len_mismatch; uint64_t rx_l2_pclp; uint64_t rx_ip_ver_errs; uint64_t rx_ip_csum_errs; uint64_t rx_ip_hdr_malformed; uint64_t rx_ip_payload_malformed; uint64_t rx_ip_ttl_errs; uint64_t rx_l3_pclp; uint64_t rx_l4_malformed; uint64_t rx_l4_csum_errs; uint64_t rx_udp_len_errs; uint64_t rx_l4_port_errs; uint64_t rx_tcp_flag_errs; uint64_t rx_tcp_offset_errs; uint64_t rx_l4_pclp; uint64_t rx_truncated_pkts; uint64_t tx_bytes_ok; uint64_t tx_ucast_frames_ok; uint64_t tx_bcast_frames_ok; uint64_t tx_mcast_frames_ok; uint64_t tx_drops; }; struct nicvf_drv_stats { /* Rx */ uint64_t rx_frames_ok; uint64_t rx_frames_64; uint64_t rx_frames_127; uint64_t rx_frames_255; uint64_t rx_frames_511; uint64_t rx_frames_1023; uint64_t rx_frames_1518; uint64_t rx_frames_jumbo; uint64_t rx_drops; /* Tx */ uint64_t tx_frames_ok; uint64_t tx_drops; uint64_t tx_tso; uint64_t txq_stop; uint64_t txq_wake; }; struct nicvf { struct nicvf *pnicvf; device_t dev; - struct ifnet * ifp; + if_t ifp; struct sx core_sx; struct ifmedia if_media; uint32_t if_flags; uint8_t hwaddr[ETHER_ADDR_LEN]; uint8_t vf_id; uint8_t node; boolean_t tns_mode:1; boolean_t sqs_mode:1; bool loopback_supported:1; struct nicvf_rss_info rss_info; uint16_t mtu; struct queue_set *qs; uint8_t rx_queues; uint8_t tx_queues; uint8_t max_queues; struct resource *reg_base; boolean_t link_up; boolean_t hw_tso; uint8_t duplex; uint32_t speed; uint8_t cpi_alg; /* Interrupt coalescing settings */ uint32_t cq_coalesce_usecs; uint32_t msg_enable; struct nicvf_hw_stats hw_stats; struct nicvf_drv_stats drv_stats; struct bgx_stats bgx_stats; /* Interface statistics */ struct callout stats_callout; struct mtx stats_mtx; /* MSI-X */ boolean_t msix_enabled; uint8_t num_vec; struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS]; struct resource * msix_table_res; char irq_name[NIC_VF_MSIX_VECTORS][20]; boolean_t irq_allocated[NIC_VF_MSIX_VECTORS]; /* VF <-> PF mailbox communication */ boolean_t pf_acked; boolean_t pf_nacked; } __aligned(CACHE_LINE_SIZE); /* * PF <--> VF Mailbox communication * Eight 64bit registers are shared between PF and VF. * Separate set for each VF. * Writing '1' into last register mbx7 means end of message. */ /* PF <--> VF mailbox communication */ #define NIC_PF_VF_MAILBOX_SIZE 2 #define NIC_MBOX_MSG_TIMEOUT 2000 /* ms */ /* Mailbox message types */ #define NIC_MBOX_MSG_READY 0x01 /* Is PF ready to rcv msgs */ #define NIC_MBOX_MSG_ACK 0x02 /* ACK the message received */ #define NIC_MBOX_MSG_NACK 0x03 /* NACK the message received */ #define NIC_MBOX_MSG_QS_CFG 0x04 /* Configure Qset */ #define NIC_MBOX_MSG_RQ_CFG 0x05 /* Configure receive queue */ #define NIC_MBOX_MSG_SQ_CFG 0x06 /* Configure Send queue */ #define NIC_MBOX_MSG_RQ_DROP_CFG 0x07 /* Configure receive queue */ #define NIC_MBOX_MSG_SET_MAC 0x08 /* Add MAC ID to DMAC filter */ #define NIC_MBOX_MSG_SET_MAX_FRS 0x09 /* Set max frame size */ #define NIC_MBOX_MSG_CPI_CFG 0x0A /* Config CPI, RSSI */ #define NIC_MBOX_MSG_RSS_SIZE 0x0B /* Get RSS indir_tbl size */ #define NIC_MBOX_MSG_RSS_CFG 0x0C /* Config RSS table */ #define NIC_MBOX_MSG_RSS_CFG_CONT 0x0D /* RSS config continuation */ #define NIC_MBOX_MSG_RQ_BP_CFG 0x0E /* RQ backpressure config */ #define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */ #define NIC_MBOX_MSG_BGX_STATS 0x10 /* Get stats from BGX */ #define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */ #define NIC_MBOX_MSG_ALLOC_SQS 0x12 /* Allocate secondary Qset */ #define NIC_MBOX_MSG_NICVF_PTR 0x13 /* Send nicvf ptr to PF */ #define NIC_MBOX_MSG_PNICVF_PTR 0x14 /* Get primary qset nicvf ptr */ #define NIC_MBOX_MSG_SNICVF_PTR 0x15 /* Send sqet nicvf ptr to PVF */ #define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */ #define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */ #define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */ struct nic_cfg_msg { uint8_t msg; uint8_t vf_id; uint8_t node_id; boolean_t tns_mode:1; boolean_t sqs_mode:1; boolean_t loopback_supported:1; uint8_t mac_addr[ETHER_ADDR_LEN]; }; /* Qset configuration */ struct qs_cfg_msg { uint8_t msg; uint8_t num; uint8_t sqs_count; uint64_t cfg; }; /* Receive queue configuration */ struct rq_cfg_msg { uint8_t msg; uint8_t qs_num; uint8_t rq_num; uint64_t cfg; }; /* Send queue configuration */ struct sq_cfg_msg { uint8_t msg; uint8_t qs_num; uint8_t sq_num; boolean_t sqs_mode; uint64_t cfg; }; /* Set VF's MAC address */ struct set_mac_msg { uint8_t msg; uint8_t vf_id; uint8_t mac_addr[ETHER_ADDR_LEN]; }; /* Set Maximum frame size */ struct set_frs_msg { uint8_t msg; uint8_t vf_id; uint16_t max_frs; }; /* Set CPI algorithm type */ struct cpi_cfg_msg { uint8_t msg; uint8_t vf_id; uint8_t rq_cnt; uint8_t cpi_alg; }; /* Get RSS table size */ struct rss_sz_msg { uint8_t msg; uint8_t vf_id; uint16_t ind_tbl_size; }; /* Set RSS configuration */ struct rss_cfg_msg { uint8_t msg; uint8_t vf_id; uint8_t hash_bits; uint8_t tbl_len; uint8_t tbl_offset; #define RSS_IND_TBL_LEN_PER_MBX_MSG 8 uint8_t ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG]; }; struct bgx_stats_msg { uint8_t msg; uint8_t vf_id; uint8_t rx; uint8_t idx; uint64_t stats; }; /* Physical interface link status */ struct bgx_link_status { uint8_t msg; uint8_t link_up; uint8_t duplex; uint32_t speed; }; /* Set interface in loopback mode */ struct set_loopback { uint8_t msg; uint8_t vf_id; boolean_t enable; }; /* 128 bit shared memory between PF and each VF */ union nic_mbx { struct { uint8_t msg; } msg; struct nic_cfg_msg nic_cfg; struct qs_cfg_msg qs; struct rq_cfg_msg rq; struct sq_cfg_msg sq; struct set_mac_msg mac; struct set_frs_msg frs; struct cpi_cfg_msg cpi_cfg; struct rss_sz_msg rss_size; struct rss_cfg_msg rss_cfg; struct bgx_stats_msg bgx_stats; struct bgx_link_status link_status; struct set_loopback lbk; }; #define NIC_NODE_ID_MASK 0x03 #define NIC_NODE_ID_SHIFT 44 static __inline int nic_get_node_id(struct resource *res) { pci_addr_t addr; addr = rman_get_start(res); return ((addr >> NIC_NODE_ID_SHIFT) & NIC_NODE_ID_MASK); } static __inline boolean_t pass1_silicon(device_t dev) { /* Check if the chip revision is < Pass2 */ return (pci_get_revid(dev) < PCI_REVID_PASS2); } int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx); #endif /* NIC_H */ diff --git a/sys/dev/vnic/nicvf_main.c b/sys/dev/vnic/nicvf_main.c index 54efc58fee22..b135c745b902 100644 --- a/sys/dev/vnic/nicvf_main.c +++ b/sys/dev/vnic/nicvf_main.c @@ -1,1620 +1,1620 @@ /* * Copyright (C) 2015 Cavium Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "thunder_bgx.h" #include "nic_reg.h" #include "nic.h" #include "nicvf_queues.h" #define VNIC_VF_DEVSTR "Cavium Thunder NIC Virtual Function Driver" #define VNIC_VF_REG_RID PCIR_BAR(PCI_CFG_REG_BAR_NUM) /* Lock for core interface settings */ #define NICVF_CORE_LOCK_INIT(nic) \ sx_init(&(nic)->core_sx, device_get_nameunit((nic)->dev)) #define NICVF_CORE_LOCK_DESTROY(nic) \ sx_destroy(&(nic)->core_sx) #define NICVF_CORE_LOCK(nic) sx_xlock(&(nic)->core_sx) #define NICVF_CORE_UNLOCK(nic) sx_xunlock(&(nic)->core_sx) #define NICVF_CORE_LOCK_ASSERT(nic) sx_assert(&(nic)->core_sx, SA_XLOCKED) #define SPEED_10 10 #define SPEED_100 100 #define SPEED_1000 1000 #define SPEED_10000 10000 #define SPEED_40000 40000 MALLOC_DEFINE(M_NICVF, "nicvf", "ThunderX VNIC VF dynamic memory"); static int nicvf_probe(device_t); static int nicvf_attach(device_t); static int nicvf_detach(device_t); static device_method_t nicvf_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nicvf_probe), DEVMETHOD(device_attach, nicvf_attach), DEVMETHOD(device_detach, nicvf_detach), DEVMETHOD_END, }; static driver_t nicvf_driver = { "vnic", nicvf_methods, sizeof(struct nicvf), }; DRIVER_MODULE(vnicvf, pci, nicvf_driver, 0, 0); MODULE_VERSION(vnicvf, 1); MODULE_DEPEND(vnicvf, pci, 1, 1, 1); MODULE_DEPEND(vnicvf, ether, 1, 1, 1); MODULE_DEPEND(vnicvf, vnicpf, 1, 1, 1); static int nicvf_allocate_misc_interrupt(struct nicvf *); static int nicvf_enable_misc_interrupt(struct nicvf *); static int nicvf_allocate_net_interrupts(struct nicvf *); static void nicvf_release_all_interrupts(struct nicvf *); static int nicvf_update_hw_max_frs(struct nicvf *, int); static int nicvf_hw_set_mac_addr(struct nicvf *, uint8_t *); static void nicvf_config_cpi(struct nicvf *); static int nicvf_rss_init(struct nicvf *); static int nicvf_init_resources(struct nicvf *); static int nicvf_setup_ifnet(struct nicvf *); static int nicvf_setup_ifmedia(struct nicvf *); static void nicvf_hw_addr_random(uint8_t *); -static int nicvf_if_ioctl(struct ifnet *, u_long, caddr_t); +static int nicvf_if_ioctl(if_t, u_long, caddr_t); static void nicvf_if_init(void *); static void nicvf_if_init_locked(struct nicvf *); -static int nicvf_if_transmit(struct ifnet *, struct mbuf *); -static void nicvf_if_qflush(struct ifnet *); -static uint64_t nicvf_if_getcounter(struct ifnet *, ift_counter); +static int nicvf_if_transmit(if_t, struct mbuf *); +static void nicvf_if_qflush(if_t); +static uint64_t nicvf_if_getcounter(if_t, ift_counter); static int nicvf_stop_locked(struct nicvf *); -static void nicvf_media_status(struct ifnet *, struct ifmediareq *); -static int nicvf_media_change(struct ifnet *); +static void nicvf_media_status(if_t, struct ifmediareq *); +static int nicvf_media_change(if_t); static void nicvf_tick_stats(void *); static int nicvf_probe(device_t dev) { uint16_t vendor_id; uint16_t device_id; vendor_id = pci_get_vendor(dev); device_id = pci_get_device(dev); if (vendor_id != PCI_VENDOR_ID_CAVIUM) return (ENXIO); if (device_id == PCI_DEVICE_ID_THUNDER_NIC_VF || device_id == PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF) { device_set_desc(dev, VNIC_VF_DEVSTR); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int nicvf_attach(device_t dev) { struct nicvf *nic; int rid, qcount; int err = 0; uint8_t hwaddr[ETHER_ADDR_LEN]; uint8_t zeromac[] = {[0 ... (ETHER_ADDR_LEN - 1)] = 0}; nic = device_get_softc(dev); nic->dev = dev; nic->pnicvf = nic; NICVF_CORE_LOCK_INIT(nic); /* Enable HW TSO on Pass2 */ if (!pass1_silicon(dev)) nic->hw_tso = TRUE; rid = VNIC_VF_REG_RID; nic->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (nic->reg_base == NULL) { device_printf(dev, "Could not allocate registers memory\n"); return (ENXIO); } qcount = MAX_CMP_QUEUES_PER_QS; nic->max_queues = qcount; err = nicvf_set_qset_resources(nic); if (err != 0) goto err_free_res; /* Check if PF is alive and get MAC address for this VF */ err = nicvf_allocate_misc_interrupt(nic); if (err != 0) goto err_free_res; NICVF_CORE_LOCK(nic); err = nicvf_enable_misc_interrupt(nic); NICVF_CORE_UNLOCK(nic); if (err != 0) goto err_release_intr; err = nicvf_allocate_net_interrupts(nic); if (err != 0) { device_printf(dev, "Could not allocate network interface interrupts\n"); goto err_free_ifnet; } /* If no MAC address was obtained we generate random one */ if (memcmp(nic->hwaddr, zeromac, ETHER_ADDR_LEN) == 0) { nicvf_hw_addr_random(hwaddr); memcpy(nic->hwaddr, hwaddr, ETHER_ADDR_LEN); NICVF_CORE_LOCK(nic); nicvf_hw_set_mac_addr(nic, hwaddr); NICVF_CORE_UNLOCK(nic); } /* Configure CPI alorithm */ nic->cpi_alg = CPI_ALG_NONE; NICVF_CORE_LOCK(nic); nicvf_config_cpi(nic); /* Configure receive side scaling */ if (nic->qs->rq_cnt > 1) nicvf_rss_init(nic); NICVF_CORE_UNLOCK(nic); err = nicvf_setup_ifnet(nic); if (err != 0) { device_printf(dev, "Could not set-up ifnet\n"); goto err_release_intr; } err = nicvf_setup_ifmedia(nic); if (err != 0) { device_printf(dev, "Could not set-up ifmedia\n"); goto err_free_ifnet; } mtx_init(&nic->stats_mtx, "VNIC stats", NULL, MTX_DEF); callout_init_mtx(&nic->stats_callout, &nic->stats_mtx, 0); ether_ifattach(nic->ifp, nic->hwaddr); return (0); err_free_ifnet: if_free(nic->ifp); err_release_intr: nicvf_release_all_interrupts(nic); err_free_res: bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(nic->reg_base), nic->reg_base); return (err); } static int nicvf_detach(device_t dev) { struct nicvf *nic; nic = device_get_softc(dev); NICVF_CORE_LOCK(nic); /* Shut down the port and release ring resources */ nicvf_stop_locked(nic); /* Release stats lock */ mtx_destroy(&nic->stats_mtx); /* Release interrupts */ nicvf_release_all_interrupts(nic); /* Release memory resource */ if (nic->reg_base != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(nic->reg_base), nic->reg_base); } /* Remove all ifmedia configurations */ ifmedia_removeall(&nic->if_media); /* Free this ifnet */ if_free(nic->ifp); NICVF_CORE_UNLOCK(nic); /* Finally destroy the lock */ NICVF_CORE_LOCK_DESTROY(nic); return (0); } static void nicvf_hw_addr_random(uint8_t *hwaddr) { uint32_t rnd; uint8_t addr[ETHER_ADDR_LEN]; /* * Create randomized MAC address. * Set 'bsd' + random 24 low-order bits. */ rnd = arc4random() & 0x00ffffff; addr[0] = 'b'; addr[1] = 's'; addr[2] = 'd'; addr[3] = rnd >> 16; addr[4] = rnd >> 8; addr[5] = rnd >> 0; memcpy(hwaddr, addr, ETHER_ADDR_LEN); } static int nicvf_setup_ifnet(struct nicvf *nic) { - struct ifnet *ifp; + if_t ifp; ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(nic->dev, "Could not allocate ifnet structure\n"); return (ENOMEM); } nic->ifp = ifp; if_setsoftc(ifp, nic); if_initname(ifp, device_get_name(nic->dev), device_get_unit(nic->dev)); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_settransmitfn(ifp, nicvf_if_transmit); if_setqflushfn(ifp, nicvf_if_qflush); if_setioctlfn(ifp, nicvf_if_ioctl); if_setinitfn(ifp, nicvf_if_init); if_setgetcounterfn(ifp, nicvf_if_getcounter); if_setmtu(ifp, ETHERMTU); /* Reset caps */ if_setcapabilities(ifp, 0); /* Set the default values */ if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU, 0); if_setcapabilitiesbit(ifp, IFCAP_LRO, 0); if (nic->hw_tso) { /* TSO */ if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0); /* TSO parameters */ if_sethwtsomax(ifp, NICVF_TSO_MAXSIZE); if_sethwtsomaxsegcount(ifp, NICVF_TSO_NSEGS); if_sethwtsomaxsegsize(ifp, MCLBYTES); } /* IP/TCP/UDP HW checksums */ if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0); if_setcapabilitiesbit(ifp, IFCAP_HWSTATS, 0); /* * HW offload enable */ if_clearhwassist(ifp); if_sethwassistbits(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP), 0); if (nic->hw_tso) if_sethwassistbits(ifp, (CSUM_TSO), 0); if_setcapenable(ifp, if_getcapabilities(ifp)); return (0); } static int nicvf_setup_ifmedia(struct nicvf *nic) { ifmedia_init(&nic->if_media, IFM_IMASK, nicvf_media_change, nicvf_media_status); /* * Advertise availability of all possible connection types, * even though not all are possible at the same time. */ ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_10_T | IFM_FDX), 0, NULL); ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_100_TX | IFM_FDX), 0, NULL); ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_1000_T | IFM_FDX), 0, NULL); ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_10G_SR | IFM_FDX), 0, NULL); ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_40G_CR4 | IFM_FDX), 0, NULL); ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_AUTO | IFM_FDX), 0, NULL); ifmedia_set(&nic->if_media, (IFM_ETHER | IFM_AUTO | IFM_FDX)); return (0); } static int -nicvf_if_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) +nicvf_if_ioctl(if_t ifp, u_long cmd, caddr_t data) { struct nicvf *nic; struct rcv_queue *rq; struct ifreq *ifr; uint32_t flags; int mask, err; int rq_idx; #if defined(INET) || defined(INET6) struct ifaddr *ifa; boolean_t avoid_reset = FALSE; #endif nic = if_getsoftc(ifp); ifr = (struct ifreq *)data; #if defined(INET) || defined(INET6) ifa = (struct ifaddr *)data; #endif err = 0; switch (cmd) { case SIOCSIFADDR: #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) avoid_reset = TRUE; #endif #ifdef INET6 if (ifa->ifa_addr->sa_family == AF_INET6) avoid_reset = TRUE; #endif #if defined(INET) || defined(INET6) /* Avoid reinitialization unless it's necessary */ if (avoid_reset) { if_setflagbits(ifp, IFF_UP, 0); if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) nicvf_if_init(nic); #ifdef INET if (!(if_getflags(ifp) & IFF_NOARP)) arp_ifinit(ifp, ifa); #endif return (0); } #endif err = ether_ioctl(ifp, cmd, data); break; case SIOCSIFMTU: if (ifr->ifr_mtu < NIC_HW_MIN_FRS || ifr->ifr_mtu > NIC_HW_MAX_FRS) { err = EINVAL; } else { NICVF_CORE_LOCK(nic); err = nicvf_update_hw_max_frs(nic, ifr->ifr_mtu); if (err == 0) if_setmtu(ifp, ifr->ifr_mtu); NICVF_CORE_UNLOCK(nic); } break; case SIOCSIFFLAGS: NICVF_CORE_LOCK(nic); flags = if_getflags(ifp); if (flags & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { if ((flags ^ nic->if_flags) & IFF_PROMISC) { /* Change promiscous mode */ #if 0 /* XXX */ nicvf_set_promiscous(nic); #endif } if ((flags ^ nic->if_flags) & IFF_ALLMULTI) { /* Change multicasting settings */ #if 0 /* XXX */ nicvf_set_multicast(nic); #endif } } else { nicvf_if_init_locked(nic); } } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) nicvf_stop_locked(nic); nic->if_flags = flags; NICVF_CORE_UNLOCK(nic); break; case SIOCADDMULTI: case SIOCDELMULTI: if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { #if 0 NICVF_CORE_LOCK(nic); /* ARM64TODO */ nicvf_set_multicast(nic); NICVF_CORE_UNLOCK(nic); #endif } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: err = ifmedia_ioctl(ifp, ifr, &nic->if_media, cmd); break; case SIOCSIFCAP: mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap; if (mask & IFCAP_VLAN_MTU) { /* No work to do except acknowledge the change took. */ if_togglecapenable(ifp, IFCAP_VLAN_MTU); } if (mask & IFCAP_TXCSUM) if_togglecapenable(ifp, IFCAP_TXCSUM); if (mask & IFCAP_RXCSUM) if_togglecapenable(ifp, IFCAP_RXCSUM); if ((mask & IFCAP_TSO4) && nic->hw_tso) if_togglecapenable(ifp, IFCAP_TSO4); if (mask & IFCAP_LRO) { /* * Lock the driver for a moment to avoid * mismatch in per-queue settings. */ NICVF_CORE_LOCK(nic); if_togglecapenable(ifp, IFCAP_LRO); if ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0) { /* * Now disable LRO for subsequent packets. * Atomicity of this change is not necessary * as we don't need precise toggle of this * feature for all threads processing the * completion queue. */ for (rq_idx = 0; rq_idx < nic->qs->rq_cnt; rq_idx++) { rq = &nic->qs->rq[rq_idx]; rq->lro_enabled = !rq->lro_enabled; } } NICVF_CORE_UNLOCK(nic); } break; default: err = ether_ioctl(ifp, cmd, data); break; } return (err); } static void nicvf_if_init_locked(struct nicvf *nic) { struct queue_set *qs = nic->qs; - struct ifnet *ifp; + if_t ifp; int qidx; int err; caddr_t if_addr; NICVF_CORE_LOCK_ASSERT(nic); ifp = nic->ifp; if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) nicvf_stop_locked(nic); err = nicvf_enable_misc_interrupt(nic); if (err != 0) { if_printf(ifp, "Could not reenable Mbox interrupt\n"); return; } /* Get the latest MAC address */ if_addr = if_getlladdr(ifp); /* Update MAC address if changed */ if (memcmp(nic->hwaddr, if_addr, ETHER_ADDR_LEN) != 0) { memcpy(nic->hwaddr, if_addr, ETHER_ADDR_LEN); nicvf_hw_set_mac_addr(nic, if_addr); } /* Initialize the queues */ err = nicvf_init_resources(nic); if (err != 0) goto error; /* Make sure queue initialization is written */ wmb(); nicvf_reg_write(nic, NIC_VF_INT, ~0UL); /* Enable Qset err interrupt */ nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); /* Enable completion queue interrupt */ for (qidx = 0; qidx < qs->cq_cnt; qidx++) nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx); /* Enable RBDR threshold interrupt */ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); nic->drv_stats.txq_stop = 0; nic->drv_stats.txq_wake = 0; /* Activate network interface */ if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); /* Schedule callout to update stats */ callout_reset(&nic->stats_callout, hz, nicvf_tick_stats, nic); return; error: /* Something went very wrong. Disable this ifnet for good */ if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); } static void nicvf_if_init(void *if_softc) { struct nicvf *nic = if_softc; NICVF_CORE_LOCK(nic); nicvf_if_init_locked(nic); NICVF_CORE_UNLOCK(nic); } static int -nicvf_if_transmit(struct ifnet *ifp, struct mbuf *mbuf) +nicvf_if_transmit(if_t ifp, struct mbuf *mbuf) { struct nicvf *nic = if_getsoftc(ifp); struct queue_set *qs = nic->qs; struct snd_queue *sq; struct mbuf *mtmp; int qidx; int err = 0; if (__predict_false(qs == NULL)) { panic("%s: missing queue set for %s", __func__, device_get_nameunit(nic->dev)); } /* Select queue */ if (M_HASHTYPE_GET(mbuf) != M_HASHTYPE_NONE) qidx = mbuf->m_pkthdr.flowid % qs->sq_cnt; else qidx = curcpu % qs->sq_cnt; sq = &qs->sq[qidx]; if (mbuf->m_next != NULL && (mbuf->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP)) != 0) { if (M_WRITABLE(mbuf) == 0) { mtmp = m_dup(mbuf, M_NOWAIT); m_freem(mbuf); if (mtmp == NULL) return (ENOBUFS); mbuf = mtmp; } } err = drbr_enqueue(ifp, sq->br, mbuf); if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) || !nic->link_up || (err != 0)) { /* * Try to enqueue packet to the ring buffer. * If the driver is not active, link down or enqueue operation * failed, return with the appropriate error code. */ return (err); } if (NICVF_TX_TRYLOCK(sq) != 0) { err = nicvf_xmit_locked(sq); NICVF_TX_UNLOCK(sq); return (err); } else taskqueue_enqueue(sq->snd_taskq, &sq->snd_task); return (0); } static void -nicvf_if_qflush(struct ifnet *ifp) +nicvf_if_qflush(if_t ifp) { struct nicvf *nic; struct queue_set *qs; struct snd_queue *sq; struct mbuf *mbuf; size_t idx; nic = if_getsoftc(ifp); qs = nic->qs; for (idx = 0; idx < qs->sq_cnt; idx++) { sq = &qs->sq[idx]; NICVF_TX_LOCK(sq); while ((mbuf = buf_ring_dequeue_sc(sq->br)) != NULL) m_freem(mbuf); NICVF_TX_UNLOCK(sq); } if_qflush(ifp); } static uint64_t -nicvf_if_getcounter(struct ifnet *ifp, ift_counter cnt) +nicvf_if_getcounter(if_t ifp, ift_counter cnt) { struct nicvf *nic; struct nicvf_hw_stats *hw_stats; struct nicvf_drv_stats *drv_stats; nic = if_getsoftc(ifp); hw_stats = &nic->hw_stats; drv_stats = &nic->drv_stats; switch (cnt) { case IFCOUNTER_IPACKETS: return (drv_stats->rx_frames_ok); case IFCOUNTER_OPACKETS: return (drv_stats->tx_frames_ok); case IFCOUNTER_IBYTES: return (hw_stats->rx_bytes); case IFCOUNTER_OBYTES: return (hw_stats->tx_bytes_ok); case IFCOUNTER_IMCASTS: return (hw_stats->rx_mcast_frames); case IFCOUNTER_COLLISIONS: return (0); case IFCOUNTER_IQDROPS: return (drv_stats->rx_drops); case IFCOUNTER_OQDROPS: return (drv_stats->tx_drops); default: return (if_get_counter_default(ifp, cnt)); } } static void -nicvf_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) +nicvf_media_status(if_t ifp, struct ifmediareq *ifmr) { struct nicvf *nic = if_getsoftc(ifp); NICVF_CORE_LOCK(nic); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (nic->link_up) { /* Device attached to working network */ ifmr->ifm_status |= IFM_ACTIVE; } switch (nic->speed) { case SPEED_10: ifmr->ifm_active |= IFM_10_T; break; case SPEED_100: ifmr->ifm_active |= IFM_100_TX; break; case SPEED_1000: ifmr->ifm_active |= IFM_1000_T; break; case SPEED_10000: ifmr->ifm_active |= IFM_10G_SR; break; case SPEED_40000: ifmr->ifm_active |= IFM_40G_CR4; break; default: ifmr->ifm_active |= IFM_AUTO; break; } if (nic->duplex) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; NICVF_CORE_UNLOCK(nic); } static int -nicvf_media_change(struct ifnet *ifp __unused) +nicvf_media_change(if_t ifp __unused) { return (0); } /* Register read/write APIs */ void nicvf_reg_write(struct nicvf *nic, bus_space_handle_t offset, uint64_t val) { bus_write_8(nic->reg_base, offset, val); } uint64_t nicvf_reg_read(struct nicvf *nic, uint64_t offset) { return (bus_read_8(nic->reg_base, offset)); } void nicvf_queue_reg_write(struct nicvf *nic, bus_space_handle_t offset, uint64_t qidx, uint64_t val) { bus_write_8(nic->reg_base, offset + (qidx << NIC_Q_NUM_SHIFT), val); } uint64_t nicvf_queue_reg_read(struct nicvf *nic, bus_space_handle_t offset, uint64_t qidx) { return (bus_read_8(nic->reg_base, offset + (qidx << NIC_Q_NUM_SHIFT))); } /* VF -> PF mailbox communication */ static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx) { uint64_t *msg = (uint64_t *)mbx; nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]); nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]); } int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) { int timeout = NIC_MBOX_MSG_TIMEOUT * 10; int sleep = 2; NICVF_CORE_LOCK_ASSERT(nic); nic->pf_acked = FALSE; nic->pf_nacked = FALSE; nicvf_write_to_mbx(nic, mbx); /* Wait for previous message to be acked, timeout 2sec */ while (!nic->pf_acked) { if (nic->pf_nacked) return (EINVAL); DELAY(sleep * 1000); if (nic->pf_acked) break; timeout -= sleep; if (!timeout) { device_printf(nic->dev, "PF didn't ack to mbox msg %d from VF%d\n", (mbx->msg.msg & 0xFF), nic->vf_id); return (EBUSY); } } return (0); } /* * Checks if VF is able to comminicate with PF * and also gets the VNIC number this VF is associated to. */ static int nicvf_check_pf_ready(struct nicvf *nic) { union nic_mbx mbx = {}; mbx.msg.msg = NIC_MBOX_MSG_READY; if (nicvf_send_msg_to_pf(nic, &mbx)) { device_printf(nic->dev, "PF didn't respond to READY msg\n"); return 0; } return 1; } static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx) { if (bgx->rx) nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats; else nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats; } static void nicvf_handle_mbx_intr(struct nicvf *nic) { union nic_mbx mbx = {}; uint64_t *mbx_data; uint64_t mbx_addr; int i; mbx_addr = NIC_VF_PF_MAILBOX_0_1; mbx_data = (uint64_t *)&mbx; for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { *mbx_data = nicvf_reg_read(nic, mbx_addr); mbx_data++; mbx_addr += sizeof(uint64_t); } switch (mbx.msg.msg) { case NIC_MBOX_MSG_READY: nic->pf_acked = TRUE; nic->vf_id = mbx.nic_cfg.vf_id & 0x7F; nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F; nic->node = mbx.nic_cfg.node_id; memcpy(nic->hwaddr, mbx.nic_cfg.mac_addr, ETHER_ADDR_LEN); nic->loopback_supported = mbx.nic_cfg.loopback_supported; nic->link_up = FALSE; nic->duplex = 0; nic->speed = 0; break; case NIC_MBOX_MSG_ACK: nic->pf_acked = TRUE; break; case NIC_MBOX_MSG_NACK: nic->pf_nacked = TRUE; break; case NIC_MBOX_MSG_RSS_SIZE: nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size; nic->pf_acked = TRUE; break; case NIC_MBOX_MSG_BGX_STATS: nicvf_read_bgx_stats(nic, &mbx.bgx_stats); nic->pf_acked = TRUE; break; case NIC_MBOX_MSG_BGX_LINK_CHANGE: nic->pf_acked = TRUE; nic->link_up = mbx.link_status.link_up; nic->duplex = mbx.link_status.duplex; nic->speed = mbx.link_status.speed; if (nic->link_up) { if_setbaudrate(nic->ifp, nic->speed * 1000000); if_link_state_change(nic->ifp, LINK_STATE_UP); } else { if_setbaudrate(nic->ifp, 0); if_link_state_change(nic->ifp, LINK_STATE_DOWN); } break; default: device_printf(nic->dev, "Invalid message from PF, msg 0x%x\n", mbx.msg.msg); break; } nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0); } static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu) { union nic_mbx mbx = {}; mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS; mbx.frs.max_frs = mtu; mbx.frs.vf_id = nic->vf_id; return nicvf_send_msg_to_pf(nic, &mbx); } static int nicvf_hw_set_mac_addr(struct nicvf *nic, uint8_t *hwaddr) { union nic_mbx mbx = {}; mbx.mac.msg = NIC_MBOX_MSG_SET_MAC; mbx.mac.vf_id = nic->vf_id; memcpy(mbx.mac.mac_addr, hwaddr, ETHER_ADDR_LEN); return (nicvf_send_msg_to_pf(nic, &mbx)); } static void nicvf_config_cpi(struct nicvf *nic) { union nic_mbx mbx = {}; mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG; mbx.cpi_cfg.vf_id = nic->vf_id; mbx.cpi_cfg.cpi_alg = nic->cpi_alg; mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt; nicvf_send_msg_to_pf(nic, &mbx); } static void nicvf_get_rss_size(struct nicvf *nic) { union nic_mbx mbx = {}; mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE; mbx.rss_size.vf_id = nic->vf_id; nicvf_send_msg_to_pf(nic, &mbx); } static void nicvf_config_rss(struct nicvf *nic) { union nic_mbx mbx = {}; struct nicvf_rss_info *rss; int ind_tbl_len; int i, nextq; rss = &nic->rss_info; ind_tbl_len = rss->rss_size; nextq = 0; mbx.rss_cfg.vf_id = nic->vf_id; mbx.rss_cfg.hash_bits = rss->hash_bits; while (ind_tbl_len != 0) { mbx.rss_cfg.tbl_offset = nextq; mbx.rss_cfg.tbl_len = MIN(ind_tbl_len, RSS_IND_TBL_LEN_PER_MBX_MSG); mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ? NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG; for (i = 0; i < mbx.rss_cfg.tbl_len; i++) mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++]; nicvf_send_msg_to_pf(nic, &mbx); ind_tbl_len -= mbx.rss_cfg.tbl_len; } } static void nicvf_set_rss_key(struct nicvf *nic) { struct nicvf_rss_info *rss; uint64_t key_addr; int idx; rss = &nic->rss_info; key_addr = NIC_VNIC_RSS_KEY_0_4; for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) { nicvf_reg_write(nic, key_addr, rss->key[idx]); key_addr += sizeof(uint64_t); } } static int nicvf_rss_init(struct nicvf *nic) { struct nicvf_rss_info *rss; int idx; nicvf_get_rss_size(nic); rss = &nic->rss_info; if (nic->cpi_alg != CPI_ALG_NONE) { rss->enable = FALSE; rss->hash_bits = 0; return (ENXIO); } rss->enable = TRUE; /* Using the HW reset value for now */ rss->key[0] = 0xFEED0BADFEED0BADUL; rss->key[1] = 0xFEED0BADFEED0BADUL; rss->key[2] = 0xFEED0BADFEED0BADUL; rss->key[3] = 0xFEED0BADFEED0BADUL; rss->key[4] = 0xFEED0BADFEED0BADUL; nicvf_set_rss_key(nic); rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA; nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg); rss->hash_bits = fls(rss->rss_size) - 1; for (idx = 0; idx < rss->rss_size; idx++) rss->ind_tbl[idx] = idx % nic->rx_queues; nicvf_config_rss(nic); return (0); } static int nicvf_init_resources(struct nicvf *nic) { int err; union nic_mbx mbx = {}; mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; /* Enable Qset */ nicvf_qset_config(nic, TRUE); /* Initialize queues and HW for data transfer */ err = nicvf_config_data_transfer(nic, TRUE); if (err) { device_printf(nic->dev, "Failed to alloc/config VF's QSet resources\n"); return (err); } /* Send VF config done msg to PF */ nicvf_write_to_mbx(nic, &mbx); return (0); } static void nicvf_misc_intr_handler(void *arg) { struct nicvf *nic = (struct nicvf *)arg; uint64_t intr; intr = nicvf_reg_read(nic, NIC_VF_INT); /* Check for spurious interrupt */ if (!(intr & NICVF_INTR_MBOX_MASK)) return; nicvf_handle_mbx_intr(nic); } static int nicvf_intr_handler(void *arg) { struct nicvf *nic; struct cmp_queue *cq; int qidx; cq = (struct cmp_queue *)arg; nic = cq->nic; qidx = cq->idx; /* Disable interrupts */ nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task); /* Clear interrupt */ nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx); return (FILTER_HANDLED); } static void nicvf_rbdr_intr_handler(void *arg) { struct nicvf *nic; struct queue_set *qs; struct rbdr *rbdr; int qidx; nic = (struct nicvf *)arg; /* Disable RBDR interrupt and schedule softirq */ for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) { if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx)) continue; nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx); qs = nic->qs; rbdr = &qs->rbdr[qidx]; taskqueue_enqueue(rbdr->rbdr_taskq, &rbdr->rbdr_task_nowait); /* Clear interrupt */ nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx); } } static void nicvf_qs_err_intr_handler(void *arg) { struct nicvf *nic = (struct nicvf *)arg; struct queue_set *qs = nic->qs; /* Disable Qset err interrupt and schedule softirq */ nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0); taskqueue_enqueue(qs->qs_err_taskq, &qs->qs_err_task); nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0); } static int nicvf_enable_msix(struct nicvf *nic) { struct pci_devinfo *dinfo; int rid, count; int ret; dinfo = device_get_ivars(nic->dev); rid = dinfo->cfg.msix.msix_table_bar; nic->msix_table_res = bus_alloc_resource_any(nic->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (nic->msix_table_res == NULL) { device_printf(nic->dev, "Could not allocate memory for MSI-X table\n"); return (ENXIO); } count = nic->num_vec = NIC_VF_MSIX_VECTORS; ret = pci_alloc_msix(nic->dev, &count); if ((ret != 0) || (count != nic->num_vec)) { device_printf(nic->dev, "Request for #%d msix vectors failed, error: %d\n", nic->num_vec, ret); return (ret); } nic->msix_enabled = 1; return (0); } static void nicvf_disable_msix(struct nicvf *nic) { if (nic->msix_enabled) { pci_release_msi(nic->dev); nic->msix_enabled = 0; nic->num_vec = 0; } } static void nicvf_release_all_interrupts(struct nicvf *nic) { struct resource *res; int irq; int err __diagused; /* Free registered interrupts */ for (irq = 0; irq < nic->num_vec; irq++) { res = nic->msix_entries[irq].irq_res; if (res == NULL) continue; /* Teardown interrupt first */ if (nic->msix_entries[irq].handle != NULL) { err = bus_teardown_intr(nic->dev, nic->msix_entries[irq].irq_res, nic->msix_entries[irq].handle); KASSERT(err == 0, ("ERROR: Unable to teardown interrupt %d", irq)); nic->msix_entries[irq].handle = NULL; } bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res), nic->msix_entries[irq].irq_res); nic->msix_entries[irq].irq_res = NULL; } /* Disable MSI-X */ nicvf_disable_msix(nic); } /* * Initialize MSIX vectors and register MISC interrupt. * Send READY message to PF to check if its alive */ static int nicvf_allocate_misc_interrupt(struct nicvf *nic) { struct resource *res; int irq, rid; int ret = 0; /* Return if mailbox interrupt is already registered */ if (nic->msix_enabled) return (0); /* Enable MSI-X */ if (nicvf_enable_msix(nic) != 0) return (ENXIO); irq = NICVF_INTR_ID_MISC; rid = irq + 1; nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev, SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE)); if (nic->msix_entries[irq].irq_res == NULL) { device_printf(nic->dev, "Could not allocate Mbox interrupt for VF%d\n", device_get_unit(nic->dev)); return (ENXIO); } ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res, (INTR_MPSAFE | INTR_TYPE_MISC), NULL, nicvf_misc_intr_handler, nic, &nic->msix_entries[irq].handle); if (ret != 0) { res = nic->msix_entries[irq].irq_res; bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res), res); nic->msix_entries[irq].irq_res = NULL; return (ret); } return (0); } static int nicvf_enable_misc_interrupt(struct nicvf *nic) { /* Enable mailbox interrupt */ nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0); /* Check if VF is able to communicate with PF */ if (!nicvf_check_pf_ready(nic)) { nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); return (ENXIO); } return (0); } static void nicvf_release_net_interrupts(struct nicvf *nic) { struct resource *res; int irq; int err; for_each_cq_irq(irq) { res = nic->msix_entries[irq].irq_res; if (res == NULL) continue; /* Teardown active interrupts first */ if (nic->msix_entries[irq].handle != NULL) { err = bus_teardown_intr(nic->dev, nic->msix_entries[irq].irq_res, nic->msix_entries[irq].handle); KASSERT(err == 0, ("ERROR: Unable to teardown CQ interrupt %d", (irq - NICVF_INTR_ID_CQ))); if (err != 0) continue; } /* Release resource */ bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res), res); nic->msix_entries[irq].irq_res = NULL; } for_each_rbdr_irq(irq) { res = nic->msix_entries[irq].irq_res; if (res == NULL) continue; /* Teardown active interrupts first */ if (nic->msix_entries[irq].handle != NULL) { err = bus_teardown_intr(nic->dev, nic->msix_entries[irq].irq_res, nic->msix_entries[irq].handle); KASSERT(err == 0, ("ERROR: Unable to teardown RDBR interrupt %d", (irq - NICVF_INTR_ID_RBDR))); if (err != 0) continue; } /* Release resource */ bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res), res); nic->msix_entries[irq].irq_res = NULL; } irq = NICVF_INTR_ID_QS_ERR; res = nic->msix_entries[irq].irq_res; if (res != NULL) { /* Teardown active interrupts first */ if (nic->msix_entries[irq].handle != NULL) { err = bus_teardown_intr(nic->dev, nic->msix_entries[irq].irq_res, nic->msix_entries[irq].handle); KASSERT(err == 0, ("ERROR: Unable to teardown QS Error interrupt %d", irq)); if (err != 0) return; } /* Release resource */ bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res), res); nic->msix_entries[irq].irq_res = NULL; } } static int nicvf_allocate_net_interrupts(struct nicvf *nic) { u_int cpuid; int irq, rid; int qidx; int ret = 0; /* MSI-X must be configured by now */ if (!nic->msix_enabled) { device_printf(nic->dev, "Cannot alloacte queue interrups. " "MSI-X interrupts disabled.\n"); return (ENXIO); } /* Register CQ interrupts */ for_each_cq_irq(irq) { if (irq >= (NICVF_INTR_ID_CQ + nic->qs->cq_cnt)) break; qidx = irq - NICVF_INTR_ID_CQ; rid = irq + 1; nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev, SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE)); if (nic->msix_entries[irq].irq_res == NULL) { device_printf(nic->dev, "Could not allocate CQ interrupt %d for VF%d\n", (irq - NICVF_INTR_ID_CQ), device_get_unit(nic->dev)); ret = ENXIO; goto error; } ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res, (INTR_MPSAFE | INTR_TYPE_NET), nicvf_intr_handler, NULL, &nic->qs->cq[qidx], &nic->msix_entries[irq].handle); if (ret != 0) { device_printf(nic->dev, "Could not setup CQ interrupt %d for VF%d\n", (irq - NICVF_INTR_ID_CQ), device_get_unit(nic->dev)); goto error; } cpuid = (device_get_unit(nic->dev) * CMP_QUEUE_CNT) + qidx; cpuid %= mp_ncpus; /* * Save CPU ID for later use when system-wide RSS is enabled. * It will be used to pit the CQ task to the same CPU that got * interrupted. */ nic->qs->cq[qidx].cmp_cpuid = cpuid; if (bootverbose) { device_printf(nic->dev, "bind CQ%d IRQ to CPU%d\n", qidx, cpuid); } /* Bind interrupts to the given CPU */ bus_bind_intr(nic->dev, nic->msix_entries[irq].irq_res, cpuid); } /* Register RBDR interrupt */ for_each_rbdr_irq(irq) { if (irq >= (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt)) break; rid = irq + 1; nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev, SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE)); if (nic->msix_entries[irq].irq_res == NULL) { device_printf(nic->dev, "Could not allocate RBDR interrupt %d for VF%d\n", (irq - NICVF_INTR_ID_RBDR), device_get_unit(nic->dev)); ret = ENXIO; goto error; } ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res, (INTR_MPSAFE | INTR_TYPE_NET), NULL, nicvf_rbdr_intr_handler, nic, &nic->msix_entries[irq].handle); if (ret != 0) { device_printf(nic->dev, "Could not setup RBDR interrupt %d for VF%d\n", (irq - NICVF_INTR_ID_RBDR), device_get_unit(nic->dev)); goto error; } } /* Register QS error interrupt */ irq = NICVF_INTR_ID_QS_ERR; rid = irq + 1; nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev, SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE)); if (nic->msix_entries[irq].irq_res == NULL) { device_printf(nic->dev, "Could not allocate QS Error interrupt for VF%d\n", device_get_unit(nic->dev)); ret = ENXIO; goto error; } ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res, (INTR_MPSAFE | INTR_TYPE_NET), NULL, nicvf_qs_err_intr_handler, nic, &nic->msix_entries[irq].handle); if (ret != 0) { device_printf(nic->dev, "Could not setup QS Error interrupt for VF%d\n", device_get_unit(nic->dev)); goto error; } return (0); error: nicvf_release_net_interrupts(nic); return (ret); } static int nicvf_stop_locked(struct nicvf *nic) { - struct ifnet *ifp; + if_t ifp; int qidx; struct queue_set *qs = nic->qs; union nic_mbx mbx = {}; NICVF_CORE_LOCK_ASSERT(nic); /* Stop callout. Can block here since holding SX lock */ callout_drain(&nic->stats_callout); ifp = nic->ifp; mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; nicvf_send_msg_to_pf(nic, &mbx); /* Disable RBDR & QS error interrupts */ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx); nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx); } nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0); nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0); /* Deactivate network interface */ if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); /* Free resources */ nicvf_config_data_transfer(nic, FALSE); /* Disable HW Qset */ nicvf_qset_config(nic, FALSE); /* disable mailbox interrupt */ nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); return (0); } static void nicvf_update_stats(struct nicvf *nic) { int qidx; struct nicvf_hw_stats *stats = &nic->hw_stats; struct nicvf_drv_stats *drv_stats = &nic->drv_stats; struct queue_set *qs = nic->qs; #define GET_RX_STATS(reg) \ nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | ((reg) << 3)) #define GET_TX_STATS(reg) \ nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | ((reg) << 3)) stats->rx_bytes = GET_RX_STATS(RX_OCTS); stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST); stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST); stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST); stats->rx_fcs_errors = GET_RX_STATS(RX_FCS); stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR); stats->rx_drop_red = GET_RX_STATS(RX_RED); stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS); stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN); stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS); stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST); stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST); stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST); stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST); stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS); stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST); stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST); stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST); stats->tx_drops = GET_TX_STATS(TX_DROP); drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + stats->tx_bcast_frames_ok + stats->tx_mcast_frames_ok; drv_stats->rx_drops = stats->rx_drop_red + stats->rx_drop_overrun; drv_stats->tx_drops = stats->tx_drops; /* Update RQ and SQ stats */ for (qidx = 0; qidx < qs->rq_cnt; qidx++) nicvf_update_rq_stats(nic, qidx); for (qidx = 0; qidx < qs->sq_cnt; qidx++) nicvf_update_sq_stats(nic, qidx); } static void nicvf_tick_stats(void *arg) { struct nicvf *nic; nic = (struct nicvf *)arg; /* Read the statistics */ nicvf_update_stats(nic); callout_reset(&nic->stats_callout, hz, nicvf_tick_stats, nic); } diff --git a/sys/dev/vnic/nicvf_queues.c b/sys/dev/vnic/nicvf_queues.c index d2a6f8532b37..017d1c6e3e1c 100644 --- a/sys/dev/vnic/nicvf_queues.c +++ b/sys/dev/vnic/nicvf_queues.c @@ -1,2369 +1,2369 @@ /* * Copyright (C) 2015 Cavium Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "thunder_bgx.h" #include "nic_reg.h" #include "nic.h" #include "q_struct.h" #include "nicvf_queues.h" #define DEBUG #undef DEBUG #ifdef DEBUG #define dprintf(dev, fmt, ...) device_printf(dev, fmt, ##__VA_ARGS__) #else #define dprintf(dev, fmt, ...) #endif MALLOC_DECLARE(M_NICVF); static void nicvf_free_snd_queue(struct nicvf *, struct snd_queue *); static struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *, struct cqe_rx_t *); static void nicvf_sq_disable(struct nicvf *, int); static void nicvf_sq_enable(struct nicvf *, struct snd_queue *, int); static void nicvf_put_sq_desc(struct snd_queue *, int); static void nicvf_cmp_queue_config(struct nicvf *, struct queue_set *, int, boolean_t); static void nicvf_sq_free_used_descs(struct nicvf *, struct snd_queue *, int); static int nicvf_tx_mbuf_locked(struct snd_queue *, struct mbuf **); static void nicvf_rbdr_task(void *, int); static void nicvf_rbdr_task_nowait(void *, int); struct rbuf_info { bus_dma_tag_t dmat; bus_dmamap_t dmap; struct mbuf * mbuf; }; #define GET_RBUF_INFO(x) ((struct rbuf_info *)((x) - NICVF_RCV_BUF_ALIGN_BYTES)) /* Poll a register for a specific value */ static int nicvf_poll_reg(struct nicvf *nic, int qidx, uint64_t reg, int bit_pos, int bits, int val) { uint64_t bit_mask; uint64_t reg_val; int timeout = 10; bit_mask = (1UL << bits) - 1; bit_mask = (bit_mask << bit_pos); while (timeout) { reg_val = nicvf_queue_reg_read(nic, reg, qidx); if (((reg_val & bit_mask) >> bit_pos) == val) return (0); DELAY(1000); timeout--; } device_printf(nic->dev, "Poll on reg 0x%lx failed\n", reg); return (ETIMEDOUT); } /* Callback for bus_dmamap_load() */ static void nicvf_dmamap_q_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr; KASSERT(nseg == 1, ("wrong number of segments, should be 1")); paddr = arg; *paddr = segs->ds_addr; } /* Allocate memory for a queue's descriptors */ static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, int q_len, int desc_size, int align_bytes) { int err, err_dmat __diagused; /* Create DMA tag first */ err = bus_dma_tag_create( bus_get_dma_tag(nic->dev), /* parent tag */ align_bytes, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ (q_len * desc_size), /* maxsize */ 1, /* nsegments */ (q_len * desc_size), /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &dmem->dmat); /* dmat */ if (err != 0) { device_printf(nic->dev, "Failed to create busdma tag for descriptors ring\n"); return (err); } /* Allocate segment of continuous DMA safe memory */ err = bus_dmamem_alloc( dmem->dmat, /* DMA tag */ &dmem->base, /* virtual address */ (BUS_DMA_NOWAIT | BUS_DMA_ZERO), /* flags */ &dmem->dmap); /* DMA map */ if (err != 0) { device_printf(nic->dev, "Failed to allocate DMA safe memory for" "descriptors ring\n"); goto dmamem_fail; } err = bus_dmamap_load( dmem->dmat, dmem->dmap, dmem->base, (q_len * desc_size), /* allocation size */ nicvf_dmamap_q_cb, /* map to DMA address cb. */ &dmem->phys_base, /* physical address */ BUS_DMA_NOWAIT); if (err != 0) { device_printf(nic->dev, "Cannot load DMA map of descriptors ring\n"); goto dmamap_fail; } dmem->q_len = q_len; dmem->size = (desc_size * q_len); return (0); dmamap_fail: bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap); dmem->phys_base = 0; dmamem_fail: err_dmat = bus_dma_tag_destroy(dmem->dmat); dmem->base = NULL; KASSERT(err_dmat == 0, ("%s: Trying to destroy BUSY DMA tag", __func__)); return (err); } /* Free queue's descriptor memory */ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) { int err __diagused; if ((dmem == NULL) || (dmem->base == NULL)) return; /* Unload a map */ bus_dmamap_sync(dmem->dmat, dmem->dmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(dmem->dmat, dmem->dmap); /* Free DMA memory */ bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap); /* Destroy DMA tag */ err = bus_dma_tag_destroy(dmem->dmat); KASSERT(err == 0, ("%s: Trying to destroy BUSY DMA tag", __func__)); dmem->phys_base = 0; dmem->base = NULL; } /* * Allocate buffer for packet reception * HW returns memory address where packet is DMA'ed but not a pointer * into RBDR ring, so save buffer address at the start of fragment and * align the start address to a cache aligned address */ static __inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr, bus_dmamap_t dmap, int mflags, uint32_t buf_len, bus_addr_t *rbuf) { struct mbuf *mbuf; struct rbuf_info *rinfo; bus_dma_segment_t segs[1]; int nsegs; int err; mbuf = m_getjcl(mflags, MT_DATA, M_PKTHDR, MCLBYTES); if (mbuf == NULL) return (ENOMEM); /* * The length is equal to the actual length + one 128b line * used as a room for rbuf_info structure. */ mbuf->m_len = mbuf->m_pkthdr.len = buf_len; err = bus_dmamap_load_mbuf_sg(rbdr->rbdr_buff_dmat, dmap, mbuf, segs, &nsegs, BUS_DMA_NOWAIT); if (err != 0) { device_printf(nic->dev, "Failed to map mbuf into DMA visible memory, err: %d\n", err); m_freem(mbuf); bus_dmamap_destroy(rbdr->rbdr_buff_dmat, dmap); return (err); } if (nsegs != 1) panic("Unexpected number of DMA segments for RB: %d", nsegs); /* * Now use the room for rbuf_info structure * and adjust mbuf data and length. */ rinfo = (struct rbuf_info *)mbuf->m_data; m_adj(mbuf, NICVF_RCV_BUF_ALIGN_BYTES); rinfo->dmat = rbdr->rbdr_buff_dmat; rinfo->dmap = dmap; rinfo->mbuf = mbuf; *rbuf = segs[0].ds_addr + NICVF_RCV_BUF_ALIGN_BYTES; return (0); } /* Retrieve mbuf for received packet */ static struct mbuf * nicvf_rb_ptr_to_mbuf(struct nicvf *nic, bus_addr_t rb_ptr) { struct mbuf *mbuf; struct rbuf_info *rinfo; /* Get buffer start address and alignment offset */ rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(rb_ptr)); /* Now retrieve mbuf to give to stack */ mbuf = rinfo->mbuf; if (__predict_false(mbuf == NULL)) { panic("%s: Received packet fragment with NULL mbuf", device_get_nameunit(nic->dev)); } /* * Clear the mbuf in the descriptor to indicate * that this slot is processed and free to use. */ rinfo->mbuf = NULL; bus_dmamap_sync(rinfo->dmat, rinfo->dmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rinfo->dmat, rinfo->dmap); return (mbuf); } /* Allocate RBDR ring and populate receive buffers */ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, int ring_len, int buf_size, int qidx) { bus_dmamap_t dmap; bus_addr_t rbuf; struct rbdr_entry_t *desc; int idx; int err; /* Allocate rbdr descriptors ring */ err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, sizeof(struct rbdr_entry_t), NICVF_RCV_BUF_ALIGN_BYTES); if (err != 0) { device_printf(nic->dev, "Failed to create RBDR descriptors ring\n"); return (err); } rbdr->desc = rbdr->dmem.base; /* * Buffer size has to be in multiples of 128 bytes. * Make room for metadata of size of one line (128 bytes). */ rbdr->dma_size = buf_size - NICVF_RCV_BUF_ALIGN_BYTES; rbdr->enable = TRUE; rbdr->thresh = RBDR_THRESH; rbdr->nic = nic; rbdr->idx = qidx; /* * Create DMA tag for Rx buffers. * Each map created using this tag is intended to store Rx payload for * one fragment and one header structure containing rbuf_info (thus * additional 128 byte line since RB must be a multiple of 128 byte * cache line). */ if (buf_size > MCLBYTES) { device_printf(nic->dev, "Buffer size to large for mbuf cluster\n"); return (EINVAL); } err = bus_dma_tag_create( bus_get_dma_tag(nic->dev), /* parent tag */ NICVF_RCV_BUF_ALIGN_BYTES, /* alignment */ 0, /* boundary */ DMAP_MAX_PHYSADDR, /* lowaddr */ DMAP_MIN_PHYSADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ roundup2(buf_size, MCLBYTES), /* maxsize */ 1, /* nsegments */ roundup2(buf_size, MCLBYTES), /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &rbdr->rbdr_buff_dmat); /* dmat */ if (err != 0) { device_printf(nic->dev, "Failed to create busdma tag for RBDR buffers\n"); return (err); } rbdr->rbdr_buff_dmaps = malloc(sizeof(*rbdr->rbdr_buff_dmaps) * ring_len, M_NICVF, (M_WAITOK | M_ZERO)); for (idx = 0; idx < ring_len; idx++) { err = bus_dmamap_create(rbdr->rbdr_buff_dmat, 0, &dmap); if (err != 0) { device_printf(nic->dev, "Failed to create DMA map for RB\n"); return (err); } rbdr->rbdr_buff_dmaps[idx] = dmap; err = nicvf_alloc_rcv_buffer(nic, rbdr, dmap, M_WAITOK, DMA_BUFFER_LEN, &rbuf); if (err != 0) return (err); desc = GET_RBDR_DESC(rbdr, idx); desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN); } /* Allocate taskqueue */ TASK_INIT(&rbdr->rbdr_task, 0, nicvf_rbdr_task, rbdr); TASK_INIT(&rbdr->rbdr_task_nowait, 0, nicvf_rbdr_task_nowait, rbdr); rbdr->rbdr_taskq = taskqueue_create_fast("nicvf_rbdr_taskq", M_WAITOK, taskqueue_thread_enqueue, &rbdr->rbdr_taskq); taskqueue_start_threads(&rbdr->rbdr_taskq, 1, PI_NET, "%s: rbdr_taskq", device_get_nameunit(nic->dev)); return (0); } /* Free RBDR ring and its receive buffers */ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) { struct mbuf *mbuf; struct queue_set *qs; struct rbdr_entry_t *desc; struct rbuf_info *rinfo; bus_addr_t buf_addr; int head, tail, idx; int err __diagused; qs = nic->qs; if ((qs == NULL) || (rbdr == NULL)) return; rbdr->enable = FALSE; if (rbdr->rbdr_taskq != NULL) { /* Remove tasks */ while (taskqueue_cancel(rbdr->rbdr_taskq, &rbdr->rbdr_task_nowait, NULL) != 0) { /* Finish the nowait task first */ taskqueue_drain(rbdr->rbdr_taskq, &rbdr->rbdr_task_nowait); } taskqueue_free(rbdr->rbdr_taskq); rbdr->rbdr_taskq = NULL; while (taskqueue_cancel(taskqueue_thread, &rbdr->rbdr_task, NULL) != 0) { /* Now finish the sleepable task */ taskqueue_drain(taskqueue_thread, &rbdr->rbdr_task); } } /* * Free all of the memory under the RB descriptors. * There are assumptions here: * 1. Corresponding RBDR is disabled * - it is safe to operate using head and tail indexes * 2. All bffers that were received are properly freed by * the receive handler * - there is no need to unload DMA map and free MBUF for other * descriptors than unused ones */ if (rbdr->rbdr_buff_dmat != NULL) { head = rbdr->head; tail = rbdr->tail; while (head != tail) { desc = GET_RBDR_DESC(rbdr, head); buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr)); bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap); mbuf = rinfo->mbuf; /* This will destroy everything including rinfo! */ m_freem(mbuf); head++; head &= (rbdr->dmem.q_len - 1); } /* Free tail descriptor */ desc = GET_RBDR_DESC(rbdr, tail); buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr)); bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap); mbuf = rinfo->mbuf; /* This will destroy everything including rinfo! */ m_freem(mbuf); /* Destroy DMA maps */ for (idx = 0; idx < qs->rbdr_len; idx++) { if (rbdr->rbdr_buff_dmaps[idx] == NULL) continue; err = bus_dmamap_destroy(rbdr->rbdr_buff_dmat, rbdr->rbdr_buff_dmaps[idx]); KASSERT(err == 0, ("%s: Could not destroy DMA map for RB, desc: %d", __func__, idx)); rbdr->rbdr_buff_dmaps[idx] = NULL; } /* Now destroy the tag */ err = bus_dma_tag_destroy(rbdr->rbdr_buff_dmat); KASSERT(err == 0, ("%s: Trying to destroy BUSY DMA tag", __func__)); rbdr->head = 0; rbdr->tail = 0; } /* Free RBDR ring */ nicvf_free_q_desc_mem(nic, &rbdr->dmem); } /* * Refill receive buffer descriptors with new buffers. */ static int nicvf_refill_rbdr(struct rbdr *rbdr, int mflags) { struct nicvf *nic; struct queue_set *qs; int rbdr_idx; int tail, qcount; int refill_rb_cnt; struct rbdr_entry_t *desc; bus_dmamap_t dmap; bus_addr_t rbuf; boolean_t rb_alloc_fail; int new_rb; rb_alloc_fail = TRUE; new_rb = 0; nic = rbdr->nic; qs = nic->qs; rbdr_idx = rbdr->idx; /* Check if it's enabled */ if (!rbdr->enable) return (0); /* Get no of desc's to be refilled */ qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); qcount &= 0x7FFFF; /* Doorbell can be ringed with a max of ring size minus 1 */ if (qcount >= (qs->rbdr_len - 1)) { rb_alloc_fail = FALSE; goto out; } else refill_rb_cnt = qs->rbdr_len - qcount - 1; /* Start filling descs from tail */ tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; while (refill_rb_cnt) { tail++; tail &= (rbdr->dmem.q_len - 1); dmap = rbdr->rbdr_buff_dmaps[tail]; if (nicvf_alloc_rcv_buffer(nic, rbdr, dmap, mflags, DMA_BUFFER_LEN, &rbuf)) { /* Something went wrong. Resign */ break; } desc = GET_RBDR_DESC(rbdr, tail); desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN); refill_rb_cnt--; new_rb++; } /* make sure all memory stores are done before ringing doorbell */ wmb(); /* Check if buffer allocation failed */ if (refill_rb_cnt == 0) rb_alloc_fail = FALSE; /* Notify HW */ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, rbdr_idx, new_rb); out: if (!rb_alloc_fail) { /* * Re-enable RBDR interrupts only * if buffer allocation is success. */ nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); return (0); } return (ENOMEM); } /* Refill RBs even if sleep is needed to reclaim memory */ static void nicvf_rbdr_task(void *arg, int pending) { struct rbdr *rbdr; int err; rbdr = (struct rbdr *)arg; err = nicvf_refill_rbdr(rbdr, M_WAITOK); if (__predict_false(err != 0)) { panic("%s: Failed to refill RBs even when sleep enabled", __func__); } } /* Refill RBs as soon as possible without waiting */ static void nicvf_rbdr_task_nowait(void *arg, int pending) { struct rbdr *rbdr; int err; rbdr = (struct rbdr *)arg; err = nicvf_refill_rbdr(rbdr, M_NOWAIT); if (err != 0) { /* * Schedule another, sleepable kernel thread * that will for sure refill the buffers. */ taskqueue_enqueue(taskqueue_thread, &rbdr->rbdr_task); } } static int nicvf_rcv_pkt_handler(struct nicvf *nic, struct cmp_queue *cq, struct cqe_rx_t *cqe_rx, int cqe_type) { struct mbuf *mbuf; struct rcv_queue *rq; int rq_idx; int err = 0; rq_idx = cqe_rx->rq_idx; rq = &nic->qs->rq[rq_idx]; /* Check for errors */ err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx); if (err && !cqe_rx->rb_cnt) return (0); mbuf = nicvf_get_rcv_mbuf(nic, cqe_rx); if (mbuf == NULL) { dprintf(nic->dev, "Packet not received\n"); return (0); } /* If error packet */ if (err != 0) { m_freem(mbuf); return (0); } if (rq->lro_enabled && ((cqe_rx->l3_type == L3TYPE_IPV4) && (cqe_rx->l4_type == L4TYPE_TCP)) && (mbuf->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { /* * At this point it is known that there are no errors in the * packet. Attempt to LRO enqueue. Send to stack if no resources * or enqueue error. */ if ((rq->lro.lro_cnt != 0) && (tcp_lro_rx(&rq->lro, mbuf, 0) == 0)) return (0); } /* * Push this packet to the stack later to avoid * unlocking completion task in the middle of work. */ err = buf_ring_enqueue(cq->rx_br, mbuf); if (err != 0) { /* * Failed to enqueue this mbuf. * We don't drop it, just schedule another task. */ return (err); } return (0); } static void nicvf_snd_pkt_handler(struct nicvf *nic, struct cmp_queue *cq, struct cqe_send_t *cqe_tx, int cqe_type) { bus_dmamap_t dmap; struct mbuf *mbuf; struct snd_queue *sq; struct sq_hdr_subdesc *hdr; mbuf = NULL; sq = &nic->qs->sq[cqe_tx->sq_idx]; hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr); if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) return; dprintf(nic->dev, "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n", __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, cqe_tx->sqe_ptr, hdr->subdesc_cnt); dmap = (bus_dmamap_t)sq->snd_buff[cqe_tx->sqe_ptr].dmap; bus_dmamap_unload(sq->snd_buff_dmat, dmap); mbuf = (struct mbuf *)sq->snd_buff[cqe_tx->sqe_ptr].mbuf; if (mbuf != NULL) { m_freem(mbuf); sq->snd_buff[cqe_tx->sqe_ptr].mbuf = NULL; nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); } nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); } static int nicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx) { struct mbuf *mbuf; - struct ifnet *ifp; + if_t ifp; int processed_cqe, tx_done = 0; #ifdef DEBUG int work_done = 0; #endif int cqe_count, cqe_head; struct queue_set *qs = nic->qs; struct cmp_queue *cq = &qs->cq[cq_idx]; struct snd_queue *sq = &qs->sq[cq_idx]; struct rcv_queue *rq; struct cqe_rx_t *cq_desc; struct lro_ctrl *lro; int rq_idx; int cmp_err; NICVF_CMP_LOCK(cq); cmp_err = 0; processed_cqe = 0; /* Get no of valid CQ entries to process */ cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx); cqe_count &= CQ_CQE_COUNT; if (cqe_count == 0) goto out; /* Get head of the valid CQ entries */ cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; cqe_head &= 0xFFFF; dprintf(nic->dev, "%s CQ%d cqe_count %d cqe_head %d\n", __func__, cq_idx, cqe_count, cqe_head); while (processed_cqe < cqe_count) { /* Get the CQ descriptor */ cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); cqe_head++; cqe_head &= (cq->dmem.q_len - 1); /* Prefetch next CQ descriptor */ __builtin_prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head)); dprintf(nic->dev, "CQ%d cq_desc->cqe_type %d\n", cq_idx, cq_desc->cqe_type); switch (cq_desc->cqe_type) { case CQE_TYPE_RX: cmp_err = nicvf_rcv_pkt_handler(nic, cq, cq_desc, CQE_TYPE_RX); if (__predict_false(cmp_err != 0)) { /* * Ups. Cannot finish now. * Let's try again later. */ goto done; } #ifdef DEBUG work_done++; #endif break; case CQE_TYPE_SEND: nicvf_snd_pkt_handler(nic, cq, (void *)cq_desc, CQE_TYPE_SEND); tx_done++; break; case CQE_TYPE_INVALID: case CQE_TYPE_RX_SPLIT: case CQE_TYPE_RX_TCP: case CQE_TYPE_SEND_PTP: /* Ignore for now */ break; } processed_cqe++; } done: dprintf(nic->dev, "%s CQ%d processed_cqe %d work_done %d\n", __func__, cq_idx, processed_cqe, work_done); /* Ring doorbell to inform H/W to reuse processed CQEs */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, cq_idx, processed_cqe); if ((tx_done > 0) && ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0)) { /* Reenable TXQ if its stopped earlier due to SQ full */ if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); taskqueue_enqueue(sq->snd_taskq, &sq->snd_task); } out: /* * Flush any outstanding LRO work */ rq_idx = cq_idx; rq = &nic->qs->rq[rq_idx]; lro = &rq->lro; tcp_lro_flush_all(lro); NICVF_CMP_UNLOCK(cq); ifp = nic->ifp; /* Push received MBUFs to the stack */ while (!buf_ring_empty(cq->rx_br)) { mbuf = buf_ring_dequeue_mc(cq->rx_br); if (__predict_true(mbuf != NULL)) - (*ifp->if_input)(ifp, mbuf); + if_input(ifp, mbuf); } return (cmp_err); } /* * Qset error interrupt handler * * As of now only CQ errors are handled */ static void nicvf_qs_err_task(void *arg, int pending) { struct nicvf *nic; struct queue_set *qs; int qidx; uint64_t status; boolean_t enable = TRUE; nic = (struct nicvf *)arg; qs = nic->qs; /* Deactivate network interface */ if_setdrvflagbits(nic->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); /* Check if it is CQ err */ for (qidx = 0; qidx < qs->cq_cnt; qidx++) { status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, qidx); if ((status & CQ_ERR_MASK) == 0) continue; /* Process already queued CQEs and reconfig CQ */ nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); nicvf_sq_disable(nic, qidx); (void)nicvf_cq_intr_handler(nic, qidx); nicvf_cmp_queue_config(nic, qs, qidx, enable); nicvf_sq_free_used_descs(nic, &qs->sq[qidx], qidx); nicvf_sq_enable(nic, &qs->sq[qidx], qidx); nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx); } if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); /* Re-enable Qset error interrupt */ nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); } static void nicvf_cmp_task(void *arg, int pending) { struct cmp_queue *cq; struct nicvf *nic; int cmp_err; cq = (struct cmp_queue *)arg; nic = cq->nic; /* Handle CQ descriptors */ cmp_err = nicvf_cq_intr_handler(nic, cq->idx); if (__predict_false(cmp_err != 0)) { /* * Schedule another thread here since we did not * process the entire CQ due to Tx or Rx CQ parse error. */ taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task); } nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx); /* Reenable interrupt (previously disabled in nicvf_intr_handler() */ nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->idx); } /* Initialize completion queue */ static int nicvf_init_cmp_queue(struct nicvf *nic, struct cmp_queue *cq, int q_len, int qidx) { int err; /* Initizalize lock */ snprintf(cq->mtx_name, sizeof(cq->mtx_name), "%s: CQ(%d) lock", device_get_nameunit(nic->dev), qidx); mtx_init(&cq->mtx, cq->mtx_name, NULL, MTX_DEF); err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, NICVF_CQ_BASE_ALIGN_BYTES); if (err != 0) { device_printf(nic->dev, "Could not allocate DMA memory for CQ\n"); return (err); } cq->desc = cq->dmem.base; cq->thresh = pass1_silicon(nic->dev) ? 0 : CMP_QUEUE_CQE_THRESH; cq->nic = nic; cq->idx = qidx; nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; cq->rx_br = buf_ring_alloc(CMP_QUEUE_LEN * 8, M_DEVBUF, M_WAITOK, &cq->mtx); /* Allocate taskqueue */ NET_TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq); cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK, taskqueue_thread_enqueue, &cq->cmp_taskq); taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)", device_get_nameunit(nic->dev), qidx); return (0); } static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) { if (cq == NULL) return; /* * The completion queue itself should be disabled by now * (ref. nicvf_snd_queue_config()). * Ensure that it is safe to disable it or panic. */ if (cq->enable) panic("%s: Trying to free working CQ(%d)", __func__, cq->idx); if (cq->cmp_taskq != NULL) { /* Remove task */ while (taskqueue_cancel(cq->cmp_taskq, &cq->cmp_task, NULL) != 0) taskqueue_drain(cq->cmp_taskq, &cq->cmp_task); taskqueue_free(cq->cmp_taskq); cq->cmp_taskq = NULL; } /* * Completion interrupt will possibly enable interrupts again * so disable interrupting now after we finished processing * completion task. It is safe to do so since the corresponding CQ * was already disabled. */ nicvf_disable_intr(nic, NICVF_INTR_CQ, cq->idx); nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx); NICVF_CMP_LOCK(cq); nicvf_free_q_desc_mem(nic, &cq->dmem); drbr_free(cq->rx_br, M_DEVBUF); NICVF_CMP_UNLOCK(cq); mtx_destroy(&cq->mtx); memset(cq->mtx_name, 0, sizeof(cq->mtx_name)); } int nicvf_xmit_locked(struct snd_queue *sq) { struct nicvf *nic; - struct ifnet *ifp; + if_t ifp; struct mbuf *next; int err; NICVF_TX_LOCK_ASSERT(sq); nic = sq->nic; ifp = nic->ifp; err = 0; while ((next = drbr_peek(ifp, sq->br)) != NULL) { /* Send a copy of the frame to the BPF listener */ ETHER_BPF_MTAP(ifp, next); err = nicvf_tx_mbuf_locked(sq, &next); if (err != 0) { if (next == NULL) drbr_advance(ifp, sq->br); else drbr_putback(ifp, sq->br, next); break; } drbr_advance(ifp, sq->br); } return (err); } static void nicvf_snd_task(void *arg, int pending) { struct snd_queue *sq = (struct snd_queue *)arg; struct nicvf *nic; - struct ifnet *ifp; + if_t ifp; int err; nic = sq->nic; ifp = nic->ifp; /* * Skip sending anything if the driver is not running, * SQ full or link is down. */ if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) || !nic->link_up) return; NICVF_TX_LOCK(sq); err = nicvf_xmit_locked(sq); NICVF_TX_UNLOCK(sq); /* Try again */ if (err != 0) taskqueue_enqueue(sq->snd_taskq, &sq->snd_task); } /* Initialize transmit queue */ static int nicvf_init_snd_queue(struct nicvf *nic, struct snd_queue *sq, int q_len, int qidx) { size_t i; int err; /* Initizalize TX lock for this queue */ snprintf(sq->mtx_name, sizeof(sq->mtx_name), "%s: SQ(%d) lock", device_get_nameunit(nic->dev), qidx); mtx_init(&sq->mtx, sq->mtx_name, NULL, MTX_DEF); NICVF_TX_LOCK(sq); /* Allocate buffer ring */ sq->br = buf_ring_alloc(q_len / MIN_SQ_DESC_PER_PKT_XMIT, M_DEVBUF, M_NOWAIT, &sq->mtx); if (sq->br == NULL) { device_printf(nic->dev, "ERROR: Could not set up buf ring for SQ(%d)\n", qidx); err = ENOMEM; goto error; } /* Allocate DMA memory for Tx descriptors */ err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, NICVF_SQ_BASE_ALIGN_BYTES); if (err != 0) { device_printf(nic->dev, "Could not allocate DMA memory for SQ\n"); goto error; } sq->desc = sq->dmem.base; sq->head = sq->tail = 0; atomic_store_rel_int(&sq->free_cnt, q_len - 1); sq->thresh = SND_QUEUE_THRESH; sq->idx = qidx; sq->nic = nic; /* * Allocate DMA maps for Tx buffers */ /* Create DMA tag first */ err = bus_dma_tag_create( bus_get_dma_tag(nic->dev), /* parent tag */ 1, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ NICVF_TSO_MAXSIZE, /* maxsize */ NICVF_TSO_NSEGS, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sq->snd_buff_dmat); /* dmat */ if (err != 0) { device_printf(nic->dev, "Failed to create busdma tag for Tx buffers\n"); goto error; } /* Allocate send buffers array */ sq->snd_buff = malloc(sizeof(*sq->snd_buff) * q_len, M_NICVF, (M_NOWAIT | M_ZERO)); if (sq->snd_buff == NULL) { device_printf(nic->dev, "Could not allocate memory for Tx buffers array\n"); err = ENOMEM; goto error; } /* Now populate maps */ for (i = 0; i < q_len; i++) { err = bus_dmamap_create(sq->snd_buff_dmat, 0, &sq->snd_buff[i].dmap); if (err != 0) { device_printf(nic->dev, "Failed to create DMA maps for Tx buffers\n"); goto error; } } NICVF_TX_UNLOCK(sq); /* Allocate taskqueue */ TASK_INIT(&sq->snd_task, 0, nicvf_snd_task, sq); sq->snd_taskq = taskqueue_create_fast("nicvf_snd_taskq", M_WAITOK, taskqueue_thread_enqueue, &sq->snd_taskq); taskqueue_start_threads(&sq->snd_taskq, 1, PI_NET, "%s: snd_taskq(%d)", device_get_nameunit(nic->dev), qidx); return (0); error: NICVF_TX_UNLOCK(sq); return (err); } static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) { struct queue_set *qs = nic->qs; size_t i; int err __diagused; if (sq == NULL) return; if (sq->snd_taskq != NULL) { /* Remove task */ while (taskqueue_cancel(sq->snd_taskq, &sq->snd_task, NULL) != 0) taskqueue_drain(sq->snd_taskq, &sq->snd_task); taskqueue_free(sq->snd_taskq); sq->snd_taskq = NULL; } NICVF_TX_LOCK(sq); if (sq->snd_buff_dmat != NULL) { if (sq->snd_buff != NULL) { for (i = 0; i < qs->sq_len; i++) { m_freem(sq->snd_buff[i].mbuf); sq->snd_buff[i].mbuf = NULL; bus_dmamap_unload(sq->snd_buff_dmat, sq->snd_buff[i].dmap); err = bus_dmamap_destroy(sq->snd_buff_dmat, sq->snd_buff[i].dmap); /* * If bus_dmamap_destroy fails it can cause * random panic later if the tag is also * destroyed in the process. */ KASSERT(err == 0, ("%s: Could not destroy DMA map for SQ", __func__)); } } free(sq->snd_buff, M_NICVF); err = bus_dma_tag_destroy(sq->snd_buff_dmat); KASSERT(err == 0, ("%s: Trying to destroy BUSY DMA tag", __func__)); } /* Free private driver ring for this send queue */ if (sq->br != NULL) drbr_free(sq->br, M_DEVBUF); if (sq->dmem.base != NULL) nicvf_free_q_desc_mem(nic, &sq->dmem); NICVF_TX_UNLOCK(sq); /* Destroy Tx lock */ mtx_destroy(&sq->mtx); memset(sq->mtx_name, 0, sizeof(sq->mtx_name)); } static void nicvf_reclaim_snd_queue(struct nicvf *nic, struct queue_set *qs, int qidx) { /* Disable send queue */ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); /* Check if SQ is stopped */ if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) return; /* Reset send queue */ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); } static void nicvf_reclaim_rcv_queue(struct nicvf *nic, struct queue_set *qs, int qidx) { union nic_mbx mbx = {}; /* Make sure all packets in the pipeline are written back into mem */ mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; nicvf_send_msg_to_pf(nic, &mbx); } static void nicvf_reclaim_cmp_queue(struct nicvf *nic, struct queue_set *qs, int qidx) { /* Disable timer threshold (doesn't get reset upon CQ reset */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); /* Disable completion queue */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); /* Reset completion queue */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); } static void nicvf_reclaim_rbdr(struct nicvf *nic, struct rbdr *rbdr, int qidx) { uint64_t tmp, fifo_state; int timeout = 10; /* Save head and tail pointers for feeing up buffers */ rbdr->head = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3; rbdr->tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3; /* * If RBDR FIFO is in 'FAIL' state then do a reset first * before relaiming. */ fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); if (((fifo_state >> 62) & 0x03) == 0x3) { nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, NICVF_RBDR_RESET); } /* Disable RBDR */ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) return; while (1) { tmp = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_PREFETCH_STATUS, qidx); if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) break; DELAY(1000); timeout--; if (!timeout) { device_printf(nic->dev, "Failed polling on prefetch status\n"); return; } } nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, NICVF_RBDR_RESET); if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) return; nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) return; } /* Configures receive queue */ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, bool enable) { union nic_mbx mbx = {}; struct rcv_queue *rq; struct rq_cfg rq_cfg; - struct ifnet *ifp; + if_t ifp; struct lro_ctrl *lro; ifp = nic->ifp; rq = &qs->rq[qidx]; rq->enable = enable; lro = &rq->lro; /* Disable receive queue */ nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); if (!rq->enable) { nicvf_reclaim_rcv_queue(nic, qs, qidx); /* Free LRO memory */ tcp_lro_free(lro); rq->lro_enabled = FALSE; return; } /* Configure LRO if enabled */ rq->lro_enabled = FALSE; if ((if_getcapenable(ifp) & IFCAP_LRO) != 0) { if (tcp_lro_init(lro) != 0) { device_printf(nic->dev, "Failed to initialize LRO for RXQ%d\n", qidx); } else { rq->lro_enabled = TRUE; lro->ifp = nic->ifp; } } rq->cq_qs = qs->vnic_id; rq->cq_idx = qidx; rq->start_rbdr_qs = qs->vnic_id; rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; rq->cont_rbdr_qs = qs->vnic_id; rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; /* all writes of RBDR data to be loaded into L2 Cache as well*/ rq->caching = 1; /* Send a mailbox msg to PF to config RQ */ mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; mbx.rq.qs_num = qs->vnic_id; mbx.rq.rq_num = qidx; mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | (rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); nicvf_send_msg_to_pf(nic, &mbx); mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; mbx.rq.cfg = (1UL << 63) | (1UL << 62) | (qs->vnic_id << 0); nicvf_send_msg_to_pf(nic, &mbx); /* * RQ drop config * Enable CQ drop to reserve sufficient CQEs for all tx packets */ mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; mbx.rq.cfg = (1UL << 62) | (RQ_CQ_DROP << 8); nicvf_send_msg_to_pf(nic, &mbx); nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); /* Enable Receive queue */ rq_cfg.ena = 1; rq_cfg.tcp_ena = 0; nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(uint64_t *)&rq_cfg); } /* Configures completion queue */ static void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, boolean_t enable) { struct cmp_queue *cq; struct cq_cfg cq_cfg; cq = &qs->cq[qidx]; cq->enable = enable; if (!cq->enable) { nicvf_reclaim_cmp_queue(nic, qs, qidx); return; } /* Reset completion queue */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); /* Set completion queue base address */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, (uint64_t)(cq->dmem.phys_base)); /* Enable Completion queue */ cq_cfg.ena = 1; cq_cfg.reset = 0; cq_cfg.caching = 0; cq_cfg.qsize = CMP_QSIZE; cq_cfg.avg_con = 0; nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(uint64_t *)&cq_cfg); /* Set threshold value for interrupt generation */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, nic->cq_coalesce_usecs); } /* Configures transmit queue */ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, boolean_t enable) { union nic_mbx mbx = {}; struct snd_queue *sq; struct sq_cfg sq_cfg; sq = &qs->sq[qidx]; sq->enable = enable; if (!sq->enable) { nicvf_reclaim_snd_queue(nic, qs, qidx); return; } /* Reset send queue */ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); sq->cq_qs = qs->vnic_id; sq->cq_idx = qidx; /* Send a mailbox msg to PF to config SQ */ mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; mbx.sq.qs_num = qs->vnic_id; mbx.sq.sq_num = qidx; mbx.sq.sqs_mode = nic->sqs_mode; mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; nicvf_send_msg_to_pf(nic, &mbx); /* Set queue base address */ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, (uint64_t)(sq->dmem.phys_base)); /* Enable send queue & set queue size */ sq_cfg.ena = 1; sq_cfg.reset = 0; sq_cfg.ldwb = 0; sq_cfg.qsize = SND_QSIZE; sq_cfg.tstmp_bgx_intf = 0; nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(uint64_t *)&sq_cfg); /* Set threshold value for interrupt generation */ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); } /* Configures receive buffer descriptor ring */ static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, int qidx, boolean_t enable) { struct rbdr *rbdr; struct rbdr_cfg rbdr_cfg; rbdr = &qs->rbdr[qidx]; nicvf_reclaim_rbdr(nic, rbdr, qidx); if (!enable) return; /* Set descriptor base address */ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, (uint64_t)(rbdr->dmem.phys_base)); /* Enable RBDR & set queue size */ /* Buffer size should be in multiples of 128 bytes */ rbdr_cfg.ena = 1; rbdr_cfg.reset = 0; rbdr_cfg.ldwb = 0; rbdr_cfg.qsize = RBDR_SIZE; rbdr_cfg.avg_con = 0; rbdr_cfg.lines = rbdr->dma_size / 128; nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, *(uint64_t *)&rbdr_cfg); /* Notify HW */ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, qidx, qs->rbdr_len - 1); /* Set threshold value for interrupt generation */ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, qidx, rbdr->thresh - 1); } /* Requests PF to assign and enable Qset */ void nicvf_qset_config(struct nicvf *nic, boolean_t enable) { union nic_mbx mbx = {}; struct queue_set *qs; struct qs_cfg *qs_cfg; qs = nic->qs; if (qs == NULL) { device_printf(nic->dev, "Qset is still not allocated, don't init queues\n"); return; } qs->enable = enable; qs->vnic_id = nic->vf_id; /* Send a mailbox msg to PF to config Qset */ mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; mbx.qs.num = qs->vnic_id; mbx.qs.cfg = 0; qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; if (qs->enable) { qs_cfg->ena = 1; qs_cfg->vnic = qs->vnic_id; } nicvf_send_msg_to_pf(nic, &mbx); } static void nicvf_free_resources(struct nicvf *nic) { int qidx; struct queue_set *qs; qs = nic->qs; /* * Remove QS error task first since it has to be dead * to safely free completion queue tasks. */ if (qs->qs_err_taskq != NULL) { /* Shut down QS error tasks */ while (taskqueue_cancel(qs->qs_err_taskq, &qs->qs_err_task, NULL) != 0) { taskqueue_drain(qs->qs_err_taskq, &qs->qs_err_task); } taskqueue_free(qs->qs_err_taskq); qs->qs_err_taskq = NULL; } /* Free receive buffer descriptor ring */ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) nicvf_free_rbdr(nic, &qs->rbdr[qidx]); /* Free completion queue */ for (qidx = 0; qidx < qs->cq_cnt; qidx++) nicvf_free_cmp_queue(nic, &qs->cq[qidx]); /* Free send queue */ for (qidx = 0; qidx < qs->sq_cnt; qidx++) nicvf_free_snd_queue(nic, &qs->sq[qidx]); } static int nicvf_alloc_resources(struct nicvf *nic) { struct queue_set *qs = nic->qs; int qidx; /* Alloc receive buffer descriptor ring */ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, DMA_BUFFER_LEN, qidx)) goto alloc_fail; } /* Alloc send queue */ for (qidx = 0; qidx < qs->sq_cnt; qidx++) { if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx)) goto alloc_fail; } /* Alloc completion queue */ for (qidx = 0; qidx < qs->cq_cnt; qidx++) { if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len, qidx)) goto alloc_fail; } /* Allocate QS error taskqueue */ NET_TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic); qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK, taskqueue_thread_enqueue, &qs->qs_err_taskq); taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq", device_get_nameunit(nic->dev)); return (0); alloc_fail: nicvf_free_resources(nic); return (ENOMEM); } int nicvf_set_qset_resources(struct nicvf *nic) { struct queue_set *qs; qs = malloc(sizeof(*qs), M_NICVF, (M_ZERO | M_WAITOK)); nic->qs = qs; /* Set count of each queue */ qs->rbdr_cnt = RBDR_CNT; qs->rq_cnt = RCV_QUEUE_CNT; qs->sq_cnt = SND_QUEUE_CNT; qs->cq_cnt = CMP_QUEUE_CNT; /* Set queue lengths */ qs->rbdr_len = RCV_BUF_COUNT; qs->sq_len = SND_QUEUE_LEN; qs->cq_len = CMP_QUEUE_LEN; nic->rx_queues = qs->rq_cnt; nic->tx_queues = qs->sq_cnt; return (0); } int nicvf_config_data_transfer(struct nicvf *nic, boolean_t enable) { boolean_t disable = FALSE; struct queue_set *qs; int qidx; qs = nic->qs; if (qs == NULL) return (0); if (enable) { if (nicvf_alloc_resources(nic) != 0) return (ENOMEM); for (qidx = 0; qidx < qs->sq_cnt; qidx++) nicvf_snd_queue_config(nic, qs, qidx, enable); for (qidx = 0; qidx < qs->cq_cnt; qidx++) nicvf_cmp_queue_config(nic, qs, qidx, enable); for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) nicvf_rbdr_config(nic, qs, qidx, enable); for (qidx = 0; qidx < qs->rq_cnt; qidx++) nicvf_rcv_queue_config(nic, qs, qidx, enable); } else { for (qidx = 0; qidx < qs->rq_cnt; qidx++) nicvf_rcv_queue_config(nic, qs, qidx, disable); for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) nicvf_rbdr_config(nic, qs, qidx, disable); for (qidx = 0; qidx < qs->sq_cnt; qidx++) nicvf_snd_queue_config(nic, qs, qidx, disable); for (qidx = 0; qidx < qs->cq_cnt; qidx++) nicvf_cmp_queue_config(nic, qs, qidx, disable); nicvf_free_resources(nic); } return (0); } /* * Get a free desc from SQ * returns descriptor ponter & descriptor number */ static __inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) { int qentry; qentry = sq->tail; atomic_subtract_int(&sq->free_cnt, desc_cnt); sq->tail += desc_cnt; sq->tail &= (sq->dmem.q_len - 1); return (qentry); } /* Free descriptor back to SQ for future use */ static void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) { atomic_add_int(&sq->free_cnt, desc_cnt); sq->head += desc_cnt; sq->head &= (sq->dmem.q_len - 1); } static __inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) { qentry++; qentry &= (sq->dmem.q_len - 1); return (qentry); } static void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) { uint64_t sq_cfg; sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); sq_cfg |= NICVF_SQ_EN; nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); /* Ring doorbell so that H/W restarts processing SQEs */ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); } static void nicvf_sq_disable(struct nicvf *nic, int qidx) { uint64_t sq_cfg; sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); sq_cfg &= ~NICVF_SQ_EN; nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); } static void nicvf_sq_free_used_descs(struct nicvf *nic, struct snd_queue *sq, int qidx) { uint64_t head; struct snd_buff *snd_buff; struct sq_hdr_subdesc *hdr; NICVF_TX_LOCK(sq); head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; while (sq->head != head) { hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { nicvf_put_sq_desc(sq, 1); continue; } snd_buff = &sq->snd_buff[sq->head]; if (snd_buff->mbuf != NULL) { bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); m_freem(snd_buff->mbuf); sq->snd_buff[sq->head].mbuf = NULL; } nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); } NICVF_TX_UNLOCK(sq); } /* * Add SQ HEADER subdescriptor. * First subdescriptor for every send descriptor. */ static __inline int nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, int subdesc_cnt, struct mbuf *mbuf, int len) { struct nicvf *nic; struct sq_hdr_subdesc *hdr; struct ether_vlan_header *eh; #ifdef INET struct ip *ip; struct tcphdr *th; #endif uint16_t etype; int ehdrlen, iphlen, poff, proto; nic = sq->nic; hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); sq->snd_buff[qentry].mbuf = mbuf; memset(hdr, 0, SND_QUEUE_DESC_SIZE); hdr->subdesc_type = SQ_DESC_TYPE_HEADER; /* Enable notification via CQE after processing SQE */ hdr->post_cqe = 1; /* No of subdescriptors following this */ hdr->subdesc_cnt = subdesc_cnt; hdr->tot_len = len; eh = mtod(mbuf, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; etype = ntohs(eh->evl_proto); } else { ehdrlen = ETHER_HDR_LEN; etype = ntohs(eh->evl_encap_proto); } poff = proto = -1; switch (etype) { #ifdef INET6 case ETHERTYPE_IPV6: if (mbuf->m_len < ehdrlen + sizeof(struct ip6_hdr)) { mbuf = m_pullup(mbuf, ehdrlen +sizeof(struct ip6_hdr)); sq->snd_buff[qentry].mbuf = NULL; if (mbuf == NULL) return (ENOBUFS); } poff = ip6_lasthdr(mbuf, ehdrlen, IPPROTO_IPV6, &proto); if (poff < 0) return (ENOBUFS); poff += ehdrlen; break; #endif #ifdef INET case ETHERTYPE_IP: if (mbuf->m_len < ehdrlen + sizeof(struct ip)) { mbuf = m_pullup(mbuf, ehdrlen + sizeof(struct ip)); sq->snd_buff[qentry].mbuf = mbuf; if (mbuf == NULL) return (ENOBUFS); } if (mbuf->m_pkthdr.csum_flags & CSUM_IP) hdr->csum_l3 = 1; /* Enable IP csum calculation */ ip = (struct ip *)(mbuf->m_data + ehdrlen); iphlen = ip->ip_hl << 2; poff = ehdrlen + iphlen; proto = ip->ip_p; break; #endif } #if defined(INET6) || defined(INET) if (poff > 0 && mbuf->m_pkthdr.csum_flags != 0) { switch (proto) { case IPPROTO_TCP: if ((mbuf->m_pkthdr.csum_flags & CSUM_TCP) == 0) break; if (mbuf->m_len < (poff + sizeof(struct tcphdr))) { mbuf = m_pullup(mbuf, poff + sizeof(struct tcphdr)); sq->snd_buff[qentry].mbuf = mbuf; if (mbuf == NULL) return (ENOBUFS); } hdr->csum_l4 = SEND_L4_CSUM_TCP; break; case IPPROTO_UDP: if ((mbuf->m_pkthdr.csum_flags & CSUM_UDP) == 0) break; if (mbuf->m_len < (poff + sizeof(struct udphdr))) { mbuf = m_pullup(mbuf, poff + sizeof(struct udphdr)); sq->snd_buff[qentry].mbuf = mbuf; if (mbuf == NULL) return (ENOBUFS); } hdr->csum_l4 = SEND_L4_CSUM_UDP; break; case IPPROTO_SCTP: if ((mbuf->m_pkthdr.csum_flags & CSUM_SCTP) == 0) break; if (mbuf->m_len < (poff + sizeof(struct sctphdr))) { mbuf = m_pullup(mbuf, poff + sizeof(struct sctphdr)); sq->snd_buff[qentry].mbuf = mbuf; if (mbuf == NULL) return (ENOBUFS); } hdr->csum_l4 = SEND_L4_CSUM_SCTP; break; default: break; } hdr->l3_offset = ehdrlen; hdr->l4_offset = poff; } if ((mbuf->m_pkthdr.tso_segsz != 0) && nic->hw_tso) { th = (struct tcphdr *)((caddr_t)(mbuf->m_data + poff)); hdr->tso = 1; hdr->tso_start = poff + (th->th_off * 4); hdr->tso_max_paysize = mbuf->m_pkthdr.tso_segsz; hdr->inner_l3_offset = ehdrlen - 2; nic->drv_stats.tx_tso++; } #endif return (0); } /* * SQ GATHER subdescriptor * Must follow HDR descriptor */ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, int size, uint64_t data) { struct sq_gather_subdesc *gather; qentry &= (sq->dmem.q_len - 1); gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); memset(gather, 0, SND_QUEUE_DESC_SIZE); gather->subdesc_type = SQ_DESC_TYPE_GATHER; gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; gather->size = size; gather->addr = data; } /* Put an mbuf to a SQ for packet transfer. */ static int nicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf **mbufp) { bus_dma_segment_t segs[256]; struct snd_buff *snd_buff; size_t seg; int nsegs, qentry; int subdesc_cnt; int err; NICVF_TX_LOCK_ASSERT(sq); if (sq->free_cnt == 0) return (ENOBUFS); snd_buff = &sq->snd_buff[sq->tail]; err = bus_dmamap_load_mbuf_sg(sq->snd_buff_dmat, snd_buff->dmap, *mbufp, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(err != 0)) { /* ARM64TODO: Add mbuf defragmenting if we lack maps */ m_freem(*mbufp); *mbufp = NULL; return (err); } /* Set how many subdescriptors is required */ subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT + nsegs - 1; if (subdesc_cnt > sq->free_cnt) { /* ARM64TODO: Add mbuf defragmentation if we lack descriptors */ bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); return (ENOBUFS); } qentry = nicvf_get_sq_desc(sq, subdesc_cnt); /* Add SQ header subdesc */ err = nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, *mbufp, (*mbufp)->m_pkthdr.len); if (err != 0) { nicvf_put_sq_desc(sq, subdesc_cnt); bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); if (err == ENOBUFS) { m_freem(*mbufp); *mbufp = NULL; } return (err); } /* Add SQ gather subdescs */ for (seg = 0; seg < nsegs; seg++) { qentry = nicvf_get_nxt_sqentry(sq, qentry); nicvf_sq_add_gather_subdesc(sq, qentry, segs[seg].ds_len, segs[seg].ds_addr); } /* make sure all memory stores are done before ringing doorbell */ bus_dmamap_sync(sq->dmem.dmat, sq->dmem.dmap, BUS_DMASYNC_PREWRITE); dprintf(sq->nic->dev, "%s: sq->idx: %d, subdesc_cnt: %d\n", __func__, sq->idx, subdesc_cnt); /* Inform HW to xmit new packet */ nicvf_queue_reg_write(sq->nic, NIC_QSET_SQ_0_7_DOOR, sq->idx, subdesc_cnt); return (0); } static __inline u_int frag_num(u_int i) { #if BYTE_ORDER == BIG_ENDIAN return ((i & ~3) + 3 - (i & 3)); #else return (i); #endif } /* Returns MBUF for a received packet */ struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *nic, struct cqe_rx_t *cqe_rx) { int frag; int payload_len = 0; struct mbuf *mbuf; struct mbuf *mbuf_frag; uint16_t *rb_lens = NULL; uint64_t *rb_ptrs = NULL; mbuf = NULL; rb_lens = (uint16_t *)((uint8_t *)cqe_rx + (3 * sizeof(uint64_t))); rb_ptrs = (uint64_t *)((uint8_t *)cqe_rx + (6 * sizeof(uint64_t))); dprintf(nic->dev, "%s rb_cnt %d rb0_ptr %lx rb0_sz %d\n", __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { payload_len = rb_lens[frag_num(frag)]; if (frag == 0) { /* First fragment */ mbuf = nicvf_rb_ptr_to_mbuf(nic, (*rb_ptrs - cqe_rx->align_pad)); mbuf->m_len = payload_len; mbuf->m_data += cqe_rx->align_pad; if_setrcvif(mbuf, nic->ifp); } else { /* Add fragments */ mbuf_frag = nicvf_rb_ptr_to_mbuf(nic, *rb_ptrs); m_append(mbuf, payload_len, mbuf_frag->m_data); m_freem(mbuf_frag); } /* Next buffer pointer */ rb_ptrs++; } if (__predict_true(mbuf != NULL)) { m_fixhdr(mbuf); mbuf->m_pkthdr.flowid = cqe_rx->rq_idx; M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE); if (__predict_true((if_getcapenable(nic->ifp) & IFCAP_RXCSUM) != 0)) { /* * HW by default verifies IP & TCP/UDP/SCTP checksums */ if (__predict_true(cqe_rx->l3_type == L3TYPE_IPV4)) { mbuf->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID); } switch (cqe_rx->l4_type) { case L4TYPE_UDP: case L4TYPE_TCP: /* fall through */ mbuf->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); mbuf->m_pkthdr.csum_data = 0xffff; break; case L4TYPE_SCTP: mbuf->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; break; default: break; } } } return (mbuf); } /* Enable interrupt */ void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) { uint64_t reg_val; reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); switch (int_type) { case NICVF_INTR_CQ: reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); break; case NICVF_INTR_SQ: reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); break; case NICVF_INTR_RBDR: reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); break; case NICVF_INTR_PKT_DROP: reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT); break; case NICVF_INTR_TCP_TIMER: reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT); break; case NICVF_INTR_MBOX: reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT); break; case NICVF_INTR_QS_ERR: reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); break; default: device_printf(nic->dev, "Failed to enable interrupt: unknown type\n"); break; } nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val); } /* Disable interrupt */ void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) { uint64_t reg_val = 0; switch (int_type) { case NICVF_INTR_CQ: reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); break; case NICVF_INTR_SQ: reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); break; case NICVF_INTR_RBDR: reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); break; case NICVF_INTR_PKT_DROP: reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT); break; case NICVF_INTR_TCP_TIMER: reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT); break; case NICVF_INTR_MBOX: reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT); break; case NICVF_INTR_QS_ERR: reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); break; default: device_printf(nic->dev, "Failed to disable interrupt: unknown type\n"); break; } nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val); } /* Clear interrupt */ void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) { uint64_t reg_val = 0; switch (int_type) { case NICVF_INTR_CQ: reg_val = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); break; case NICVF_INTR_SQ: reg_val = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); break; case NICVF_INTR_RBDR: reg_val = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); break; case NICVF_INTR_PKT_DROP: reg_val = (1UL << NICVF_INTR_PKT_DROP_SHIFT); break; case NICVF_INTR_TCP_TIMER: reg_val = (1UL << NICVF_INTR_TCP_TIMER_SHIFT); break; case NICVF_INTR_MBOX: reg_val = (1UL << NICVF_INTR_MBOX_SHIFT); break; case NICVF_INTR_QS_ERR: reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); break; default: device_printf(nic->dev, "Failed to clear interrupt: unknown type\n"); break; } nicvf_reg_write(nic, NIC_VF_INT, reg_val); } /* Check if interrupt is enabled */ int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) { uint64_t reg_val; uint64_t mask = 0xff; reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); switch (int_type) { case NICVF_INTR_CQ: mask = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); break; case NICVF_INTR_SQ: mask = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); break; case NICVF_INTR_RBDR: mask = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); break; case NICVF_INTR_PKT_DROP: mask = NICVF_INTR_PKT_DROP_MASK; break; case NICVF_INTR_TCP_TIMER: mask = NICVF_INTR_TCP_TIMER_MASK; break; case NICVF_INTR_MBOX: mask = NICVF_INTR_MBOX_MASK; break; case NICVF_INTR_QS_ERR: mask = NICVF_INTR_QS_ERR_MASK; break; default: device_printf(nic->dev, "Failed to check interrupt enable: unknown type\n"); break; } return (reg_val & mask); } void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) { struct rcv_queue *rq; #define GET_RQ_STATS(reg) \ nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) rq = &nic->qs->rq[rq_idx]; rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); } void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) { struct snd_queue *sq; #define GET_SQ_STATS(reg) \ nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) sq = &nic->qs->sq[sq_idx]; sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); } /* Check for errors in the receive cmp.queue entry */ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cmp_queue *cq, struct cqe_rx_t *cqe_rx) { struct nicvf_hw_stats *stats = &nic->hw_stats; struct nicvf_drv_stats *drv_stats = &nic->drv_stats; if (!cqe_rx->err_level && !cqe_rx->err_opcode) { drv_stats->rx_frames_ok++; return (0); } switch (cqe_rx->err_opcode) { case CQ_RX_ERROP_RE_PARTIAL: stats->rx_bgx_truncated_pkts++; break; case CQ_RX_ERROP_RE_JABBER: stats->rx_jabber_errs++; break; case CQ_RX_ERROP_RE_FCS: stats->rx_fcs_errs++; break; case CQ_RX_ERROP_RE_RX_CTL: stats->rx_bgx_errs++; break; case CQ_RX_ERROP_PREL2_ERR: stats->rx_prel2_errs++; break; case CQ_RX_ERROP_L2_MAL: stats->rx_l2_hdr_malformed++; break; case CQ_RX_ERROP_L2_OVERSIZE: stats->rx_oversize++; break; case CQ_RX_ERROP_L2_UNDERSIZE: stats->rx_undersize++; break; case CQ_RX_ERROP_L2_LENMISM: stats->rx_l2_len_mismatch++; break; case CQ_RX_ERROP_L2_PCLP: stats->rx_l2_pclp++; break; case CQ_RX_ERROP_IP_NOT: stats->rx_ip_ver_errs++; break; case CQ_RX_ERROP_IP_CSUM_ERR: stats->rx_ip_csum_errs++; break; case CQ_RX_ERROP_IP_MAL: stats->rx_ip_hdr_malformed++; break; case CQ_RX_ERROP_IP_MALD: stats->rx_ip_payload_malformed++; break; case CQ_RX_ERROP_IP_HOP: stats->rx_ip_ttl_errs++; break; case CQ_RX_ERROP_L3_PCLP: stats->rx_l3_pclp++; break; case CQ_RX_ERROP_L4_MAL: stats->rx_l4_malformed++; break; case CQ_RX_ERROP_L4_CHK: stats->rx_l4_csum_errs++; break; case CQ_RX_ERROP_UDP_LEN: stats->rx_udp_len_errs++; break; case CQ_RX_ERROP_L4_PORT: stats->rx_l4_port_errs++; break; case CQ_RX_ERROP_TCP_FLAG: stats->rx_tcp_flag_errs++; break; case CQ_RX_ERROP_TCP_OFFSET: stats->rx_tcp_offset_errs++; break; case CQ_RX_ERROP_L4_PCLP: stats->rx_l4_pclp++; break; case CQ_RX_ERROP_RBDR_TRUNC: stats->rx_truncated_pkts++; break; } return (1); } /* Check for errors in the send cmp.queue entry */ int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cmp_queue *cq, struct cqe_send_t *cqe_tx) { struct cmp_queue_stats *stats = &cq->stats; switch (cqe_tx->send_status) { case CQ_TX_ERROP_GOOD: stats->tx.good++; return (0); case CQ_TX_ERROP_DESC_FAULT: stats->tx.desc_fault++; break; case CQ_TX_ERROP_HDR_CONS_ERR: stats->tx.hdr_cons_err++; break; case CQ_TX_ERROP_SUBDC_ERR: stats->tx.subdesc_err++; break; case CQ_TX_ERROP_IMM_SIZE_OFLOW: stats->tx.imm_size_oflow++; break; case CQ_TX_ERROP_DATA_SEQUENCE_ERR: stats->tx.data_seq_err++; break; case CQ_TX_ERROP_MEM_SEQUENCE_ERR: stats->tx.mem_seq_err++; break; case CQ_TX_ERROP_LOCK_VIOL: stats->tx.lock_viol++; break; case CQ_TX_ERROP_DATA_FAULT: stats->tx.data_fault++; break; case CQ_TX_ERROP_TSTMP_CONFLICT: stats->tx.tstmp_conflict++; break; case CQ_TX_ERROP_TSTMP_TIMEOUT: stats->tx.tstmp_timeout++; break; case CQ_TX_ERROP_MEM_FAULT: stats->tx.mem_fault++; break; case CQ_TX_ERROP_CK_OVERLAP: stats->tx.csum_overlap++; break; case CQ_TX_ERROP_CK_OFLOW: stats->tx.csum_overflow++; break; } return (1); } diff --git a/sys/dev/vnic/thunder_mdio.c b/sys/dev/vnic/thunder_mdio.c index d07b17b56a2e..3e9a8f74e673 100644 --- a/sys/dev/vnic/thunder_mdio.c +++ b/sys/dev/vnic/thunder_mdio.c @@ -1,512 +1,512 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "thunder_mdio_var.h" #include "lmac_if.h" #include "miibus_if.h" #define REG_BASE_RID 0 #define SMI_CMD 0x00 #define SMI_CMD_PHY_REG_ADR_SHIFT (0) #define SMI_CMD_PHY_REG_ADR_MASK (0x1FUL << SMI_CMD_PHY_REG_ADR_SHIFT) #define SMI_CMD_PHY_ADR_SHIFT (8) #define SMI_CMD_PHY_ADR_MASK (0x1FUL << SMI_CMD_PHY_ADR_SHIFT) #define SMI_CMD_PHY_OP_MASK (0x3UL << 16) #define SMI_CMD_PHY_OP_C22_READ (0x1UL << 16) #define SMI_CMD_PHY_OP_C22_WRITE (0x0UL << 16) #define SMI_CMD_PHY_OP_C45_READ (0x3UL << 16) #define SMI_CMD_PHY_OP_C45_WRITE (0x1UL << 16) #define SMI_CMD_PHY_OP_C45_ADDR (0x0UL << 16) #define SMI_WR_DAT 0x08 #define SMI_WR_DAT_PENDING (1UL << 17) #define SMI_WR_DAT_VAL (1UL << 16) #define SMI_WR_DAT_DAT_MASK (0xFFFFUL << 0) #define SMI_RD_DAT 0x10 #define SMI_RD_DAT_PENDING (1UL << 17) #define SMI_RD_DAT_VAL (1UL << 16) #define SMI_RD_DAT_DAT_MASK (0xFFFFUL << 0) #define SMI_CLK 0x18 #define SMI_CLK_PREAMBLE (1UL << 12) #define SMI_CLK_MODE (1UL << 24) #define SMI_EN 0x20 #define SMI_EN_EN (1UL << 0) /* Enabele interface */ #define SMI_DRV_CTL 0x28 static int thunder_mdio_detach(device_t); static int thunder_mdio_read(device_t, int, int); static int thunder_mdio_write(device_t, int, int, int); -static int thunder_ifmedia_change_stub(struct ifnet *); -static void thunder_ifmedia_status_stub(struct ifnet *, struct ifmediareq *); +static int thunder_ifmedia_change_stub(if_t); +static void thunder_ifmedia_status_stub(if_t, struct ifmediareq *); static int thunder_mdio_media_status(device_t, int, int *, int *, int *); static int thunder_mdio_media_change(device_t, int, int, int, int); static int thunder_mdio_phy_connect(device_t, int, int); static int thunder_mdio_phy_disconnect(device_t, int, int); static device_method_t thunder_mdio_methods[] = { /* Device interface */ DEVMETHOD(device_detach, thunder_mdio_detach), /* LMAC interface */ DEVMETHOD(lmac_media_status, thunder_mdio_media_status), DEVMETHOD(lmac_media_change, thunder_mdio_media_change), DEVMETHOD(lmac_phy_connect, thunder_mdio_phy_connect), DEVMETHOD(lmac_phy_disconnect, thunder_mdio_phy_disconnect), /* MII interface */ DEVMETHOD(miibus_readreg, thunder_mdio_read), DEVMETHOD(miibus_writereg, thunder_mdio_write), /* End */ DEVMETHOD_END }; DEFINE_CLASS_0(thunder_mdio, thunder_mdio_driver, thunder_mdio_methods, sizeof(struct thunder_mdio_softc)); DRIVER_MODULE(miibus, thunder_mdio, miibus_driver, 0, 0); MODULE_VERSION(thunder_mdio, 1); MODULE_DEPEND(thunder_mdio, ether, 1, 1, 1); MODULE_DEPEND(thunder_mdio, miibus, 1, 1, 1); MODULE_DEPEND(thunder_mdio, mrmlbus, 1, 1, 1); MALLOC_DEFINE(M_THUNDER_MDIO, "ThunderX MDIO", "Cavium ThunderX MDIO dynamic memory"); #define MDIO_LOCK_INIT(sc, name) \ mtx_init(&(sc)->mtx, name, NULL, MTX_DEF) #define MDIO_LOCK_DESTROY(sc) \ mtx_destroy(&(sc)->mtx) #define MDIO_LOCK(sc) mtx_lock(&(sc)->mtx) #define MDIO_UNLOCK(sc) mtx_unlock(&(sc)->mtx) #define MDIO_LOCK_ASSERT(sc) \ mtx_assert(&(sc)->mtx, MA_OWNED) #define mdio_reg_read(sc, reg) \ bus_read_8((sc)->reg_base, (reg)) #define mdio_reg_write(sc, reg, val) \ bus_write_8((sc)->reg_base, (reg), (val)) int thunder_mdio_attach(device_t dev) { struct thunder_mdio_softc *sc; int rid; sc = device_get_softc(dev); sc->dev = dev; /* Allocate memory resources */ rid = REG_BASE_RID; sc->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->reg_base == NULL) { device_printf(dev, "Could not allocate memory\n"); return (ENXIO); } TAILQ_INIT(&sc->phy_desc_head); MDIO_LOCK_INIT(sc, "ThunderX MDIO lock"); /* Enable SMI/MDIO interface */ mdio_reg_write(sc, SMI_EN, SMI_EN_EN); return (0); } static int thunder_mdio_detach(device_t dev) { struct thunder_mdio_softc *sc; sc = device_get_softc(dev); if (sc->reg_base != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, REG_BASE_RID, sc->reg_base); } return (0); } static __inline void thunder_mdio_set_mode(struct thunder_mdio_softc *sc, enum thunder_mdio_mode mode) { uint64_t smi_clk; if (sc->mode == mode) return; /* Set mode, IEEE CLAUSE 22 or IEEE CAUSE 45 */ smi_clk = mdio_reg_read(sc, SMI_CLK); if (mode == MODE_IEEE_C22) smi_clk &= ~SMI_CLK_MODE; else smi_clk |= SMI_CLK_MODE; /* Enable sending 32 bit preable on SMI transactions */ smi_clk |= SMI_CLK_PREAMBLE; /* Saved settings */ mdio_reg_write(sc, SMI_CLK, smi_clk); sc->mode = mode; } static int thunder_mdio_c45_addr(struct thunder_mdio_softc *sc, int phy, int reg) { uint64_t smi_cmd, smi_wr_dat; ssize_t timeout; thunder_mdio_set_mode(sc, MODE_IEEE_C45); /* Prepare data for transmission */ mdio_reg_write(sc, SMI_WR_DAT, reg & SMI_WR_DAT_DAT_MASK); /* * Assemble command */ smi_cmd = 0; /* Set opcode */ smi_cmd |= SMI_CMD_PHY_OP_C45_WRITE; /* Set PHY address */ smi_cmd |= ((phy << SMI_CMD_PHY_ADR_SHIFT) & SMI_CMD_PHY_ADR_MASK); /* Set PHY register offset */ smi_cmd |= ((reg << SMI_CMD_PHY_REG_ADR_SHIFT) & SMI_CMD_PHY_REG_ADR_MASK); mdio_reg_write(sc, SMI_CMD, smi_cmd); for (timeout = 1000; timeout > 0; timeout--) { smi_wr_dat = mdio_reg_read(sc, SMI_WR_DAT); if (smi_wr_dat & SMI_WR_DAT_PENDING) DELAY(1000); else break; } if (timeout <= 0) return (EIO); else { /* Return 0 on success */ return (0); } } static int thunder_mdio_read(device_t dev, int phy, int reg) { struct thunder_mdio_softc *sc; uint64_t smi_cmd, smi_rd_dat; ssize_t timeout; int err; sc = device_get_softc(dev); /* XXX Always C22 - for <= 1Gbps only */ thunder_mdio_set_mode(sc, MODE_IEEE_C22); /* * Assemble command */ smi_cmd = 0; /* Set opcode */ if (sc->mode == MODE_IEEE_C22) smi_cmd |= SMI_CMD_PHY_OP_C22_READ; else { smi_cmd |= SMI_CMD_PHY_OP_C45_READ; err = thunder_mdio_c45_addr(sc, phy, reg); if (err != 0) return (err); reg = (reg >> 16) & 0x1F; } /* Set PHY address */ smi_cmd |= ((phy << SMI_CMD_PHY_ADR_SHIFT) & SMI_CMD_PHY_ADR_MASK); /* Set PHY register offset */ smi_cmd |= ((reg << SMI_CMD_PHY_REG_ADR_SHIFT) & SMI_CMD_PHY_REG_ADR_MASK); mdio_reg_write(sc, SMI_CMD, smi_cmd); for (timeout = 1000; timeout > 0; timeout--) { smi_rd_dat = mdio_reg_read(sc, SMI_RD_DAT); if (smi_rd_dat & SMI_RD_DAT_PENDING) DELAY(1000); else break; } if (smi_rd_dat & SMI_RD_DAT_VAL) return (smi_rd_dat & SMI_RD_DAT_DAT_MASK); else { /* Return 0 on error */ return (0); } } static int thunder_mdio_write(device_t dev, int phy, int reg, int data) { struct thunder_mdio_softc *sc; uint64_t smi_cmd, smi_wr_dat; ssize_t timeout; sc = device_get_softc(dev); /* XXX Always C22 - for <= 1Gbps only */ thunder_mdio_set_mode(sc, MODE_IEEE_C22); /* Prepare data for transmission */ mdio_reg_write(sc, SMI_WR_DAT, data & SMI_WR_DAT_DAT_MASK); /* * Assemble command */ smi_cmd = 0; /* Set opcode */ if (sc->mode == MODE_IEEE_C22) smi_cmd |= SMI_CMD_PHY_OP_C22_WRITE; else smi_cmd |= SMI_CMD_PHY_OP_C45_WRITE; /* Set PHY address */ smi_cmd |= ((phy << SMI_CMD_PHY_ADR_SHIFT) & SMI_CMD_PHY_ADR_MASK); /* Set PHY register offset */ smi_cmd |= ((reg << SMI_CMD_PHY_REG_ADR_SHIFT) & SMI_CMD_PHY_REG_ADR_MASK); mdio_reg_write(sc, SMI_CMD, smi_cmd); for (timeout = 1000; timeout > 0; timeout--) { smi_wr_dat = mdio_reg_read(sc, SMI_WR_DAT); if (smi_wr_dat & SMI_WR_DAT_PENDING) DELAY(1000); else break; } if (timeout <= 0) return (EIO); else { /* Return 0 on success */ return (0); } } static int -thunder_ifmedia_change_stub(struct ifnet *ifp __unused) +thunder_ifmedia_change_stub(if_t ifp __unused) { /* Will never be called by if_media */ return (0); } static void -thunder_ifmedia_status_stub(struct ifnet *ifp __unused, struct ifmediareq +thunder_ifmedia_status_stub(if_t ifp __unused, struct ifmediareq *ifmr __unused) { /* Will never be called by if_media */ } static __inline struct phy_desc * get_phy_desc(struct thunder_mdio_softc *sc, int lmacid) { struct phy_desc *pd = NULL; MDIO_LOCK_ASSERT(sc); TAILQ_FOREACH(pd, &sc->phy_desc_head, phy_desc_list) { if (pd->lmacid == lmacid) break; } return (pd); } static int thunder_mdio_media_status(device_t dev, int lmacid, int *link, int *duplex, int *speed) { struct thunder_mdio_softc *sc; struct mii_data *mii_sc; struct phy_desc *pd; sc = device_get_softc(dev); MDIO_LOCK(sc); pd = get_phy_desc(sc, lmacid); if (pd == NULL) { /* Panic when invariants are enabled, fail otherwise. */ KASSERT(0, ("%s: no PHY descriptor for LMAC%d", __func__, lmacid)); MDIO_UNLOCK(sc); return (ENXIO); } mii_sc = device_get_softc(pd->miibus); mii_tick(mii_sc); if ((mii_sc->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { /* Link is up */ *link = 1; } else *link = 0; switch (IFM_SUBTYPE(mii_sc->mii_media_active)) { case IFM_10_T: *speed = 10; break; case IFM_100_TX: *speed = 100; break; case IFM_1000_T: *speed = 1000; break; default: /* IFM_NONE */ *speed = 0; } if ((IFM_OPTIONS(mii_sc->mii_media_active) & IFM_FDX) != 0) *duplex = 1; else *duplex = 0; MDIO_UNLOCK(sc); return (0); } static int thunder_mdio_media_change(device_t dev, int lmacid, int link, int duplex, int speed) { return (EIO); } static int thunder_mdio_phy_connect(device_t dev, int lmacid, int phy) { struct thunder_mdio_softc *sc; struct phy_desc *pd; int err; sc = device_get_softc(dev); MDIO_LOCK(sc); pd = get_phy_desc(sc, lmacid); MDIO_UNLOCK(sc); if (pd == NULL) { pd = malloc(sizeof(*pd), M_THUNDER_MDIO, (M_NOWAIT | M_ZERO)); if (pd == NULL) return (ENOMEM); pd->ifp = if_alloc(IFT_ETHER); if (pd->ifp == NULL) { free(pd, M_THUNDER_MDIO); return (ENOMEM); } pd->lmacid = lmacid; } err = mii_attach(dev, &pd->miibus, pd->ifp, thunder_ifmedia_change_stub, thunder_ifmedia_status_stub, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); if (err != 0) { device_printf(dev, "Could not attach PHY%d\n", phy); if_free(pd->ifp); free(pd, M_THUNDER_MDIO); return (ENXIO); } MDIO_LOCK(sc); TAILQ_INSERT_TAIL(&sc->phy_desc_head, pd, phy_desc_list); MDIO_UNLOCK(sc); return (0); } static int thunder_mdio_phy_disconnect(device_t dev, int lmacid, int phy) { struct thunder_mdio_softc *sc; struct phy_desc *pd; sc = device_get_softc(dev); MDIO_LOCK(sc); pd = get_phy_desc(sc, lmacid); if (pd == NULL) { MDIO_UNLOCK(sc); return (EINVAL); } /* Remove this PHY descriptor from the list */ TAILQ_REMOVE(&sc->phy_desc_head, pd, phy_desc_list); /* Detach miibus */ bus_generic_detach(dev); device_delete_child(dev, pd->miibus); /* Free fake ifnet */ if_free(pd->ifp); /* Free memory under phy descriptor */ free(pd, M_THUNDER_MDIO); MDIO_UNLOCK(sc); return (0); } diff --git a/sys/dev/vnic/thunder_mdio_fdt.c b/sys/dev/vnic/thunder_mdio_fdt.c index fd3905453008..d6d46b04a17f 100644 --- a/sys/dev/vnic/thunder_mdio_fdt.c +++ b/sys/dev/vnic/thunder_mdio_fdt.c @@ -1,284 +1,287 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include +#include #include #include #include +#include + #include #include #include "thunder_mdio_var.h" static int thunder_mdio_fdt_probe(device_t); static int thunder_mdio_fdt_attach(device_t); static device_method_t thunder_mdio_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, thunder_mdio_fdt_probe), DEVMETHOD(device_attach, thunder_mdio_fdt_attach), /* End */ DEVMETHOD_END }; DEFINE_CLASS_1(thunder_mdio, thunder_mdio_fdt_driver, thunder_mdio_fdt_methods, sizeof(struct thunder_mdio_softc), thunder_mdio_driver); EARLY_DRIVER_MODULE(thunder_mdio, ofwbus, thunder_mdio_fdt_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); EARLY_DRIVER_MODULE(thunder_mdio, mdionexus, thunder_mdio_fdt_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); static struct ofw_compat_data mdio_compat_data[] = { {"cavium,octeon-3860-mdio", true}, {"cavium,thunder-8890-mdio", true}, {NULL, false} }; static int thunder_mdio_fdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, mdio_compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, THUNDER_MDIO_DEVSTR); return (BUS_PROBE_DEFAULT); } static int thunder_mdio_fdt_attach(device_t dev) { phandle_t node; int ret; /* Call core attach */ ret = thunder_mdio_attach(dev); if (ret != 0) return (ret); /* * Register device to this node/xref. * Thanks to that we will be able to retrieve device_t structure * while holding only node reference acquired from FDT. */ node = ofw_bus_get_node(dev); OF_device_register_xref(OF_xref_from_node(node), dev); return (0); } struct mdionexus_softc { struct simplebus_softc simplebus_sc; }; static device_probe_t mdionexus_fdt_probe; static device_attach_t mdionexus_fdt_attach; static const struct ofw_bus_devinfo * mdionexus_ofw_get_devinfo(device_t, device_t); static device_method_t mdionexus_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mdionexus_fdt_probe), DEVMETHOD(device_attach, mdionexus_fdt_attach), /* Bus interface */ DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, mdionexus_ofw_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; DEFINE_CLASS_0(mdionexus, mdionexus_fdt_driver, mdionexus_fdt_methods, sizeof(struct mdionexus_softc)); EARLY_DRIVER_MODULE(mdionexus, mrmlbus, mdionexus_fdt_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); static int mdionexus_ofw_fill_ranges(phandle_t, struct simplebus_softc *); static int mdionexus_ofw_bus_attach(device_t); static int mdionexus_fdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "cavium,thunder-8890-mdio-nexus")) return (ENXIO); device_set_desc(dev, "Cavium ThunderX MDIO nexus"); return (BUS_PROBE_SPECIFIC); } static int mdionexus_fdt_attach(device_t dev) { int err; err = mdionexus_ofw_bus_attach(dev); if (err != 0) return (err); return (bus_generic_attach(dev)); } /* OFW bus interface */ struct mdionexus_ofw_devinfo { struct ofw_bus_devinfo di_dinfo; struct resource_list di_rl; }; static const struct ofw_bus_devinfo * mdionexus_ofw_get_devinfo(device_t bus __unused, device_t child) { struct mdionexus_ofw_devinfo *di; di = device_get_ivars(child); return (&di->di_dinfo); } /* Helper functions */ static int mdionexus_ofw_fill_ranges(phandle_t node, struct simplebus_softc *sc) { int host_address_cells; cell_t *base_ranges; ssize_t nbase_ranges; int err; int i, j, k; err = OF_searchencprop(OF_parent(node), "#address-cells", &host_address_cells, sizeof(host_address_cells)); if (err <= 0) return (-1); nbase_ranges = OF_getproplen(node, "ranges"); if (nbase_ranges < 0) return (-1); sc->nranges = nbase_ranges / sizeof(cell_t) / (sc->acells + host_address_cells + sc->scells); if (sc->nranges == 0) return (0); sc->ranges = malloc(sc->nranges * sizeof(sc->ranges[0]), M_THUNDER_MDIO, M_WAITOK); base_ranges = malloc(nbase_ranges, M_THUNDER_MDIO, M_WAITOK); OF_getencprop(node, "ranges", base_ranges, nbase_ranges); for (i = 0, j = 0; i < sc->nranges; i++) { sc->ranges[i].bus = 0; for (k = 0; k < sc->acells; k++) { sc->ranges[i].bus <<= 32; sc->ranges[i].bus |= base_ranges[j++]; } sc->ranges[i].host = 0; for (k = 0; k < host_address_cells; k++) { sc->ranges[i].host <<= 32; sc->ranges[i].host |= base_ranges[j++]; } sc->ranges[i].size = 0; for (k = 0; k < sc->scells; k++) { sc->ranges[i].size <<= 32; sc->ranges[i].size |= base_ranges[j++]; } } free(base_ranges, M_THUNDER_MDIO); return (sc->nranges); } static int mdionexus_ofw_bus_attach(device_t dev) { struct simplebus_softc *sc; struct mdionexus_ofw_devinfo *di; device_t child; phandle_t parent, node; parent = ofw_bus_get_node(dev); simplebus_init(dev, parent); sc = (struct simplebus_softc *)device_get_softc(dev); if (mdionexus_ofw_fill_ranges(parent, sc) < 0) { device_printf(dev, "could not get ranges\n"); return (ENXIO); } /* Iterate through all bus subordinates */ for (node = OF_child(parent); node > 0; node = OF_peer(node)) { /* Allocate and populate devinfo. */ di = malloc(sizeof(*di), M_THUNDER_MDIO, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&di->di_dinfo, node) != 0) { free(di, M_THUNDER_MDIO); continue; } /* Initialize and populate resource list. */ resource_list_init(&di->di_rl); ofw_bus_reg_to_rl(dev, node, sc->acells, sc->scells, &di->di_rl); ofw_bus_intr_to_rl(dev, node, &di->di_rl, NULL); /* Add newbus device for this FDT node */ child = device_add_child(dev, NULL, -1); if (child == NULL) { resource_list_free(&di->di_rl); ofw_bus_gen_destroy_devinfo(&di->di_dinfo); free(di, M_THUNDER_MDIO); continue; } device_set_ivars(child, di); } return (0); } diff --git a/sys/dev/vnic/thunder_mdio_var.h b/sys/dev/vnic/thunder_mdio_var.h index acebec0dbcc0..c66c5e9683df 100644 --- a/sys/dev/vnic/thunder_mdio_var.h +++ b/sys/dev/vnic/thunder_mdio_var.h @@ -1,63 +1,63 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #ifndef __THUNDER_MDIO_VAR_H__ #define __THUNDER_MDIO_VAR_H__ #define THUNDER_MDIO_DEVSTR "Cavium ThunderX SMI/MDIO driver" MALLOC_DECLARE(M_THUNDER_MDIO); DECLARE_CLASS(thunder_mdio_driver); enum thunder_mdio_mode { MODE_NONE = 0, MODE_IEEE_C22, MODE_IEEE_C45 }; struct phy_desc { device_t miibus; /* One miibus per LMAC */ - struct ifnet * ifp; /* Fake ifp to satisfy miibus */ + if_t ifp; /* Fake ifp to satisfy miibus */ int lmacid; /* ID number of LMAC connected */ TAILQ_ENTRY(phy_desc) phy_desc_list; }; struct thunder_mdio_softc { device_t dev; struct mtx mtx; struct resource * reg_base; enum thunder_mdio_mode mode; TAILQ_HEAD(,phy_desc) phy_desc_head; }; int thunder_mdio_attach(device_t); #endif